net: wangxun: txgbe: support wangxun 10GbE driver

add support for Wangxun 10GbE driver, source files and
functions are the same as wangxun out of box drivevr
release version txgbe-1.3.5.1.

Signed-off-by: Duanqiang Wen <duanqiangwen@net-swift.com>
Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
Duanqiang Wen 2024-04-12 14:43:38 +08:00 committed by Jianping Liu
parent cb52153887
commit b38c5bb0b9
37 changed files with 59743 additions and 0 deletions

View File

@ -29,4 +29,18 @@ config NGBE
To compile this driver as a module, choose M here. The module
will be called ngbe.
config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
select HWMON if TXGBE=y
help
This driver supports Wangxun(R) 10GbE PCI Express family of
adapters.
More specific information on configuring the driver is in
<file:Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst>.
To compile this driver as a module, choose M here. The module
will be called txgbe.
endif # NET_VENDOR_WANGXUN

View File

@ -4,3 +4,4 @@
#
obj-$(CONFIG_NGBE) += ngbe/
obj-$(CONFIG_TXGBE) += txgbe/

View File

@ -0,0 +1,28 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd.
#
# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver
#
obj-$(CONFIG_TXGBE) += txgbe.o
txgbe-objs := txgbe_main.o \
txgbe_hw.o \
txgbe_phy.o \
txgbe_ethtool.o \
txgbe_bp.o \
txgbe_dcb_nl.o \
txgbe_dcb.o \
txgbe_debugfs.o \
txgbe_fcoe.o \
txgbe_mbx.o \
txgbe_mtd.o \
txgbe_param.o \
txgbe_ptp.o \
txgbe_procfs.o \
txgbe_sriov.o \
txgbe_sysfs.o \
txgbe_xsk.o \
txgbe_lib.o \
txgbe_pcierr.o \
txgbe_kcompat.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,553 @@
#include "txgbe_bp.h"
void txgbe_bp_close_protect(struct txgbe_adapter *adapter)
{
adapter->flags2 |= TXGBE_FLAG2_KR_PRO_DOWN;
while (adapter->flags2 & TXGBE_FLAG2_KR_PRO_REINIT){
msleep(100);
printk("wait to reinited ok..%x\n",adapter->flags2);
}
}
int txgbe_bp_mode_setting(struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
/*default to open an73*/
adapter->backplane_an = AUTO?1:0;
adapter->an37 = AUTO?1:0;
switch (adapter->backplane_mode) {
case TXGBE_BP_M_KR:
hw->subsystem_device_id = TXGBE_ID_WX1820_KR_KX_KX4;
break;
case TXGBE_BP_M_KX4:
hw->subsystem_device_id = TXGBE_ID_WX1820_MAC_XAUI;
break;
case TXGBE_BP_M_KX:
hw->subsystem_device_id = TXGBE_ID_WX1820_MAC_SGMII;
break;
case TXGBE_BP_M_SFI:
hw->subsystem_device_id = TXGBE_ID_WX1820_SFP;
break;
default:
break;
}
if (adapter->backplane_auto == TXGBE_BP_M_AUTO) {
adapter->backplane_an = 1;
adapter->an37 = 1;
} else if (adapter->backplane_auto == TXGBE_BP_M_NAUTO) {
adapter->backplane_an = 0;
adapter->an37 = 0;
}
if (adapter->ffe_set == 0)
return 0;
if (KR_SET == 1) {
adapter->ffe_main = KR_MAIN;
adapter->ffe_pre = KR_PRE;
adapter->ffe_post = KR_POST;
} else if (!KR_SET && KX4_SET == 1) {
adapter->ffe_main = KX4_MAIN;
adapter->ffe_pre = KX4_PRE;
adapter->ffe_post = KX4_POST;
} else if (!KR_SET && !KX4_SET && KX_SET == 1) {
adapter->ffe_main = KX_MAIN;
adapter->ffe_pre = KX_PRE;
adapter->ffe_post = KX_POST;
} else if (!KR_SET && !KX4_SET && !KX_SET && SFI_SET == 1) {
adapter->ffe_main = SFI_MAIN;
adapter->ffe_pre = SFI_PRE;
adapter->ffe_post = SFI_POST;
}
return 0;
}
void txgbe_bp_watchdog_event(struct txgbe_adapter *adapter)
{
u32 value = 0;
struct txgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
/* only continue if link is down */
if (netif_carrier_ok(netdev))
return;
if (KR_POLLING == 1) {
value = txgbe_rd32_epcs(hw, 0x78002);
value = value & 0x4;
if (value == 0x4) {
e_dev_info("Enter training\n");
handle_bkp_an73_flow(0, adapter);
}
} else {
if(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING){
e_dev_info("Enter training\n");
handle_bkp_an73_flow(0, adapter);
adapter->flags2 &= ~TXGBE_FLAG2_KR_TRAINING;
}
}
}
void txgbe_bp_down_event(struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
u32 val = 0, val1 = 0;
if (adapter->backplane_an == 0)
return;
switch (KR_RESTART_T_MODE) {
case 1:
txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x0000);
txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0000);
txgbe_wr32_epcs(hw, 0x78001, 0x0000);
msleep(1000);
txgbe_set_link_to_kr(hw, 1);
break;
case 2:
txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x0000);
txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0000);
txgbe_wr32_epcs(hw, 0x78001, 0x0000);
msleep(1050);
txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x0001);
txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3200);
txgbe_wr32_epcs(hw, 0x78001, 0x0007);
break;
default:
if (AN73_TRAINNING_MODE == 1)
msleep(100);
else
msleep(1000);
val = txgbe_rd32_epcs(hw, 0x78002);
val1 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL);
if ((val & BIT(2)) == BIT(2)) {
if (!(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING))
adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING;
} else {
txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0);
txgbe_wr32_epcs(hw, 0x78002, 0x0000);
txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000);
}
kr_dbg(KR_MODE, "0x78002 : %x - 0x70000 : %x\n", val, val1);
kr_dbg(KR_MODE, "0x70012 : %x\n", txgbe_rd32_epcs(hw, 0x70012));
break;
}
}
/*Check Ethernet Backplane AN73 Base Page Ability
**return value:
** -1 : none link mode matched, exit
** 0 : current link mode matched, wait AN73 to be completed
** 1 : current link mode not matched, set to matched link mode, re-start AN73 external
*/
int chk_bkp_an73_ability(bkpan73ability tBkpAn73Ability, bkpan73ability tLpBkpAn73Ability,
struct txgbe_adapter *adapter)
{
unsigned int comLinkAbility;
kr_dbg(KR_MODE, "CheckBkpAn73Ability():\n");
kr_dbg(KR_MODE, "------------------------\n");
/*-- Check the common link ability and take action based on the result*/
comLinkAbility = tBkpAn73Ability.linkAbility & tLpBkpAn73Ability.linkAbility;
kr_dbg(KR_MODE, "comLinkAbility= 0x%x, linkAbility= 0x%x, lpLinkAbility= 0x%x\n",
comLinkAbility, tBkpAn73Ability.linkAbility, tLpBkpAn73Ability.linkAbility);
/*only support kr*/
if (comLinkAbility == 0){
kr_dbg(KR_MODE, "WARNING: The Link Partner does not support any compatible speed mode!!!\n\n");
return -1;
} else if (comLinkAbility & 0x80) {
if (tBkpAn73Ability.currentLinkMode == 0){
kr_dbg(KR_MODE, "Link mode is matched with Link Partner: [LINK_KR].\n");
return 0;
} else {
kr_dbg(KR_MODE, "Link mode is not matched with Link Partner: [LINK_KR].\n");
kr_dbg(KR_MODE, "Set the local link mode to [LINK_KR] ...\n");
return 1;
}
}
#if 0
if (comLinkAbility == 0){
kr_dbg(KR_MODE, "WARNING: The Link Partner does not support any compatible speed mode!!!\n\n");
return -1;
} else if (comLinkAbility & 0x80) {
if (tBkpAn73Ability.currentLinkMode == 0){
kr_dbg(KR_MODE, "Link mode is matched with Link Partner: [LINK_KR].\n");
return 0;
}else{
kr_dbg(KR_MODE, "Link mode is not matched with Link Partner: [LINK_KR].\n");
kr_dbg(KR_MODE, "Set the local link mode to [LINK_KR] ...\n");
txgbe_set_link_to_kr(hw, 1);
return 1;
}
} else if (comLinkAbility & 0x40) {
if (tBkpAn73Ability.currentLinkMode == 0x10){
kr_dbg(KR_MODE, "Link mode is matched with Link Partner: [LINK_KX4].\n");
return 0;
} else {
kr_dbg(KR_MODE, "Link mode is not matched with Link Partner: [LINK_KX4].\n");
kr_dbg(KR_MODE, "Set the local link mode to [LINK_KX4] ...\n");
txgbe_set_link_to_kx4(hw, 1);
return 1;
}
} else if (comLinkAbility & 0x20) {
if (tBkpAn73Ability.currentLinkMode == 0x1){
kr_dbg(KR_MODE, "Link mode is matched with Link Partner: [LINK_KX].\n");
return 0;
} else {
kr_dbg(KR_MODE, "Link mode is not matched with Link Partner: [LINK_KX].\n");
kr_dbg(KR_MODE, "Set the local link mode to [LINK_KX] ...\n");
txgbe_set_link_to_kx(hw, 1, 1);
return 1;
}
}
#endif
return 0;
}
/*Get Ethernet Backplane AN73 Base Page Ability
**byLinkPartner:
**- 1: Get Link Partner Base Page
**- 2: Get Link Partner Next Page (only get NXP Ability Register 1 at the moment)
**- 0: Get Local Device Base Page
*/
int get_bkp_an73_ability(bkpan73ability *pt_bkp_an73_ability, unsigned char byLinkPartner,
struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
unsigned int rdata;
int status = 0;
kr_dbg(KR_MODE, "byLinkPartner = %d\n", byLinkPartner);
kr_dbg(KR_MODE, "----------------------------------------\n");
if (byLinkPartner == 1) /*Link Partner Base Page*/
{
/*Read the link partner AN73 Base Page Ability Registers*/
kr_dbg(KR_MODE, "Read the link partner AN73 Base Page Ability Registers...\n");
rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL1);
kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 1: 0x%x\n", rdata);
pt_bkp_an73_ability->nextPage = (rdata >> 15) & 0x01;
kr_dbg(KR_MODE, " Next Page (bit15): %d\n", pt_bkp_an73_ability->nextPage);
rdata = txgbe_rd32_epcs(hw, 0x70014);
kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 2: 0x%x\n", rdata);
pt_bkp_an73_ability->linkAbility = rdata & 0xE0;
kr_dbg(KR_MODE, " Link Ability (bit[15:0]): 0x%x\n",
pt_bkp_an73_ability->linkAbility);
kr_dbg(KR_MODE, " (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n");
kr_dbg(KR_MODE, " 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n");
rdata = txgbe_rd32_epcs(hw, 0x70015);
kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 3: 0x%x\n", rdata);
kr_dbg(KR_MODE, " FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01));
kr_dbg(KR_MODE, " FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01));
pt_bkp_an73_ability->fecAbility = (rdata >> 14) & 0x03;
} else if (byLinkPartner == 2) {/*Link Partner Next Page*/
/*Read the link partner AN73 Next Page Ability Registers*/
kr_dbg(KR_MODE, "\nRead the link partner AN73 Next Page Ability Registers...\n");
rdata = txgbe_rd32_epcs(hw, 0x70019);
kr_dbg(KR_MODE, " SR AN MMD LP XNP Ability Register 1: 0x%x\n", rdata);
pt_bkp_an73_ability->nextPage = (rdata >> 15) & 0x01;
if (KR_MODE)e_dev_info(" Next Page (bit15): %d\n", pt_bkp_an73_ability->nextPage);
} else {
/*Read the local AN73 Base Page Ability Registers*/
kr_dbg(KR_MODE, "\nRead the local AN73 Base Page Ability Registers...\n");
rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1);
kr_dbg(KR_MODE, "SR AN MMD Advertisement Register 1: 0x%x\n", rdata);
pt_bkp_an73_ability->nextPage = (rdata >> 15) & 0x01;
kr_dbg(KR_MODE, " Next Page (bit15): %d\n", pt_bkp_an73_ability->nextPage);
rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2);
kr_dbg(KR_MODE, "SR AN MMD Advertisement Register 2: 0x%x\n", rdata);
pt_bkp_an73_ability->linkAbility = rdata & 0xE0;
kr_dbg(KR_MODE, " Link Ability (bit[15:0]): 0x%x\n",
pt_bkp_an73_ability->linkAbility);
kr_dbg(KR_MODE, " (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n");
kr_dbg(KR_MODE, " 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n");
rdata = txgbe_rd32_epcs(hw, 0x70012);
kr_dbg(KR_MODE, "SR AN MMD Advertisement Register 3: 0x%x\n", rdata);
kr_dbg(KR_MODE, " FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01));
kr_dbg(KR_MODE, " FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01));
pt_bkp_an73_ability->fecAbility = (rdata >> 14) & 0x03;
} /*if (byLinkPartner == 1) Link Partner Base Page*/
return status;
}
/* DESCRIPTION: Set the source data fields[bitHigh:bitLow] with setValue
** INPUTS: *src_data: Source data pointer
** bitHigh: High bit position of the fields
** bitLow : Low bit position of the fields
** setValue: Set value of the fields
** OUTPUTS: return the updated source data
*/
static void set_fields(
unsigned int *src_data,
unsigned int bitHigh,
unsigned int bitLow,
unsigned int setValue)
{
int i;
if (bitHigh == bitLow) {
if (setValue == 0)
*src_data &= ~(1 << bitLow);
else
*src_data |= (1 << bitLow);
} else {
for (i = bitLow; i <= bitHigh; i++)
*src_data &= ~(1 << i);
*src_data |= (setValue << bitLow);
}
}
/*Clear Ethernet Backplane AN73 Interrupt status
**- intIndexHi =0, only intIndex bit will be cleared
**- intIndexHi !=0, the [intIndexHi, intIndex] range will be cleared
*/
int clr_bkp_an73_int(unsigned int intIndex, unsigned int intIndexHi, struct txgbe_adapter * adapter)
{
struct txgbe_hw *hw = &adapter->hw;
unsigned int rdata, wdata;
int status = 0;
rdata = txgbe_rd32_epcs(hw, 0x78002);
kr_dbg(KR_MODE, "[Before clear] Read VR AN MMD Interrupt Register: 0x%x\n", rdata);
wdata = rdata;
if (intIndexHi)
set_fields(&wdata, intIndexHi, intIndex, 0);
else
set_fields(&wdata, intIndex, intIndex, 0);
txgbe_wr32_epcs(hw, 0x78002, wdata);
rdata = txgbe_rd32_epcs(hw, 0x78002);
kr_dbg(KR_MODE, "[After clear] Read VR AN MMD Interrupt Register: 0x%x\n", rdata);
return status;
}
void read_phy_lane_txeq(unsigned short lane, struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
unsigned int addr, rdata;
/*LANEN_DIG_ASIC_TX_ASIC_IN_1[11:6]: TX_MAIN_CURSOR*/
addr = 0x100E | (lane << 8);
rdata = rd32_ephy(hw, addr);
kr_dbg(KR_MODE, "PHY LANE%0d TX EQ Read Value:\n", lane);
kr_dbg(KR_MODE, " TX_MAIN_CURSOR: %d\n", ((rdata >> 6) & 0x3F));
/*LANEN_DIG_ASIC_TX_ASIC_IN_2[5 :0]: TX_PRE_CURSOR*/
/*LANEN_DIG_ASIC_TX_ASIC_IN_2[11:6]: TX_POST_CURSOR*/
addr = 0x100F | (lane << 8);
rdata = rd32_ephy(hw, addr);
kr_dbg(KR_MODE, " TX_PRE_CURSOR : %d\n", (rdata & 0x3F));
kr_dbg(KR_MODE, " TX_POST_CURSOR: %d\n", ((rdata >> 6) & 0x3F));
kr_dbg(KR_MODE, "**********************************************\n");
}
/*Enable Clause 72 KR training
**
**Note:
**<1>. The Clause 72 start-up protocol should be initiated when all pages are exchanged during Clause 73 auto-
**negotiation and when the auto-negotiation process is waiting for link status to be UP for 500 ms after
**exchanging all the pages.
**
**<2>. The local device and link partner should be enabled the CL72 KR training
**with in 500ms
**
**enable:
**- bits[1:0] =2'b11: Enable the CL72 KR training
**- bits[1:0] =2'b01: Disable the CL72 KR training
*/
int en_cl72_krtr(unsigned int enable, struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
unsigned int wdata = 0;
u32 val;
if (enable == 1) {
kr_dbg(KR_MODE, "\nDisable Clause 72 KR Training ...\n");
read_phy_lane_txeq(0, adapter);
} else if (enable == 3) {
kr_dbg(KR_MODE, "\nEnable Clause 72 KR Training ...\n");
if (CL72_KRTR_PRBS_MODE_EN != 0xffff) {
/*Set PRBS Timer Duration Control to maximum 6.7ms in VR_PMA_KRTR_PRBS_CTRL1 Register*/
wdata = CL72_KRTR_PRBS_MODE_EN;
txgbe_wr32_epcs(hw, 0x18005, wdata);
/*Set PRBS Timer Duration Control to maximum 6.7ms in VR_PMA_KRTR_PRBS_CTRL1 Register*/
wdata = 0xFFFF;
txgbe_wr32_epcs(hw, 0x18004, wdata);
/*Enable PRBS Mode to determine KR Training Status by setting Bit 0 of VR_PMA_KRTR_PRBS_CTRL0 Register*/
wdata = 0;
set_fields(&wdata, 0, 0, 1);
}
/*Enable PRBS31 as the KR Training Pattern by setting Bit 1 of VR_PMA_KRTR_PRBS_CTRL0 Register*/
if (CL72_KRTR_PRBS31_EN == 1)
set_fields(&wdata, 1, 1, 1);
val = txgbe_rd32_epcs(hw, 0x18003);
wdata |= val;
txgbe_wr32_epcs(hw, 0x18003, wdata);
read_phy_lane_txeq(0, adapter);
}
/*Enable the Clause 72 start-up protocol by setting Bit 1 of SR_PMA_KR_PMD_CTRL Register.
**Restart the Clause 72 start-up protocol by setting Bit 0 of SR_PMA_KR_PMD_CTRL Register*/
wdata = enable;
txgbe_wr32_epcs(hw, 0x10096, wdata);
return 0;
}
int chk_cl72_krtr_status(struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
unsigned int rdata = 0, rdata1;
int status = 0;
status = read_poll_timeout(txgbe_rd32_epcs, rdata1, (rdata1 & 0x9), 1000,
400000, false, hw, 0x10097);
if (!status) {
//Get the latest received coefficient update or status
rdata = txgbe_rd32_epcs(hw, 0x010098);
kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR LP Coefficient Update Register: 0x%x\n",
rdata);
rdata = txgbe_rd32_epcs(hw, 0x010099);
kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR LP Coefficient Status Register: 0x%x\n",
rdata);
rdata = txgbe_rd32_epcs(hw, 0x01009a);
kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR LD Coefficient Update: 0x%x\n", rdata);
rdata = txgbe_rd32_epcs(hw, 0x01009b);
kr_dbg(KR_MODE, " SR PMA MMD 10GBASE-KR LD Coefficient Status: 0x%x\n", rdata);
rdata = txgbe_rd32_epcs(hw, 0x010097);
kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR Status Register: 0x%x\n", rdata);
kr_dbg(KR_MODE, " Training Failure (bit3): %d\n", ((rdata >> 3) & 0x01));
kr_dbg(KR_MODE, " Start-Up Protocol Status (bit2): %d\n", ((rdata >> 2) & 0x01));
kr_dbg(KR_MODE, " Frame Lock (bit1): %d\n", ((rdata >> 1) & 0x01));
kr_dbg(KR_MODE, " Receiver Status (bit0): %d\n", ((rdata >> 0) & 0x01));
/*If bit3 is set, Training is completed with failure*/
if ((rdata1 >> 3) & 0x01) {
kr_dbg(KR_MODE, "Training is completed with failure!!!\n");
read_phy_lane_txeq(0, adapter);
return status;
}
/*If bit0 is set, Receiver trained and ready to receive data*/
if ((rdata1 >> 0) & 0x01) {
kr_dbg(KR_MODE, "Receiver trained and ready to receive data ^_^\n");
e_dev_info("Receiver ready.\n");
read_phy_lane_txeq(0, adapter);
return status;
}
}
kr_dbg(KR_MODE, "ERROR: Check Clause 72 KR Training Complete Timeout!!!\n");
return status;
}
int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapter)
{
bkpan73ability tBkpAn73Ability , tLpBkpAn73Ability ;
u32 rdata = 0, rdata1 = 0, round = 1;
struct txgbe_hw *hw = &adapter->hw;
bool lpld_all_rd = false;
unsigned int addr, data;
int status = 0, k;
tBkpAn73Ability.currentLinkMode = bp_link_mode;
if (AN73_TRAINNING_MODE == 1) {
round = 2;
txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0);
}
kr_dbg(KR_MODE, "HandleBkpAn73Flow().\n");
kr_dbg(KR_MODE, "---------------------------------\n");
/*1. Get the local AN73 Base Page Ability*/
kr_dbg(KR_MODE, "<1>. Get the local AN73 Base Page Ability ...\n");
get_bkp_an73_ability(&tBkpAn73Ability, 0, adapter);
/*2. Check the AN73 Interrupt Status*/
kr_dbg(KR_MODE, "<2>. Check the AN73 Interrupt Status ...\n");
/*3.Clear the AN_PG_RCV interrupt*/
clr_bkp_an73_int(2, 0x0, adapter);
/*3.1. Get the link partner AN73 Base Page Ability*/
kr_dbg(KR_MODE, "<3.1>. Get the link partner AN73 Base Page Ability ...\n");
get_bkp_an73_ability(&tLpBkpAn73Ability, 1, adapter);
/*3.2. Check the AN73 Link Ability with Link Partner*/
kr_dbg(KR_MODE, "<3.2>. Check the AN73 Link Ability with Link Partner ...\n");
kr_dbg(KR_MODE, " Local Link Ability: 0x%x\n", tBkpAn73Ability.linkAbility);
kr_dbg(KR_MODE, " Link Partner Link Ability: 0x%x\n", tLpBkpAn73Ability.linkAbility);
chk_bkp_an73_ability(tBkpAn73Ability, tLpBkpAn73Ability, adapter);
/*Check the FEC and KR Training for KR mode*/
/* FEC handling */
kr_dbg(KR_MODE, "<3.3>. Check the FEC for KR mode ...\n");
tBkpAn73Ability.fecAbility = 0x3;
tLpBkpAn73Ability.fecAbility = 0x3;
if (((tBkpAn73Ability.fecAbility & tLpBkpAn73Ability.fecAbility) == 0x03)
&& (KR_FEC == 1)) {
e_dev_info("Enable KR FEC ...\n");
//Write 1 to SR_PMA_KR_FEC_CTRL bit0 to enable the FEC
data = 1;
addr = 0x100ab; //SR_PMA_KR_FEC_CTRL
txgbe_wr32_epcs(hw, addr, data);
} else {
e_dev_info("KR FEC is disabled.\n");
}
kr_dbg(KR_MODE, "\n<3.4>. Check the CL72 KR Training for KR mode ...\n");
for (k = 0; k < round; k++) {
status |= en_cl72_krtr(3, adapter);
kr_dbg(KR_MODE, "\nCheck the Clause 72 KR Training status ...\n");
status |= chk_cl72_krtr_status(adapter);
status = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x8000), 1000,
200000, false, hw, 0x10099);
if (!status) {
rdata1 = txgbe_rd32_epcs(hw, 0x1009b) & 0x8000;
if (rdata1 == 0x8000)
lpld_all_rd = true;
}
if (lpld_all_rd) {
rdata = rd32_ephy(hw, 0x100E);
rdata1 = rd32_ephy(hw, 0x100F);
e_dev_info("Lp and Ld all Ready, FFE : %d-%d-%d.\n",
(rdata >> 6) & 0x3F, rdata1 & 0x3F, (rdata1 >> 6) & 0x3F);
clr_bkp_an73_int(2, 0, adapter);
clr_bkp_an73_int(1, 0, adapter);
clr_bkp_an73_int(0, 0, adapter);
status = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x1000), 1000,
100000, false, hw, 0x30020);
if (!status)
e_dev_info("INT_AN_INT_CMPLT =1, AN73 Done Success.\n");
return 0;
}
clr_bkp_an73_int(2, 0, adapter);
clr_bkp_an73_int(1, 0, adapter);
clr_bkp_an73_int(0, 0, adapter);
}
e_dev_info("Trainning failure\n");
if (AN73_TRAINNING_MODE == 0)
status |= en_cl72_krtr(1, adapter);
return status;
}

View File

@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
#ifndef _TXGBE_BP_H_
#define _TXGBE_BP_H_
#include "txgbe.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
/* Backplane AN73 Base Page Ability struct*/
typedef struct TBKPAN73ABILITY {
unsigned int nextPage; //Next Page (bit0)
unsigned int linkAbility; //Link Ability (bit[7:0])
unsigned int fecAbility; //FEC Request (bit1), FEC Enable (bit0)
unsigned int currentLinkMode; //current link mode for local device
} bkpan73ability;
#ifndef read_poll_timeout
#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
sleep_before_read, args...) \
({ \
u64 __timeout_us = (timeout_us); \
unsigned long __sleep_us = (sleep_us); \
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
might_sleep_if((__sleep_us) != 0); \
if (sleep_before_read && __sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
for (;;) { \
(val) = op(args); \
if (cond) \
break; \
if (__timeout_us && \
ktime_compare(ktime_get(), __timeout) > 0) { \
(val) = op(args); \
break; \
} \
if (__sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
cpu_relax(); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
#endif
#define kr_dbg(KR_MODE, fmt, arg...) \
do { \
if (KR_MODE) \
e_dev_info(fmt, ##arg); \
} while (0)
void txgbe_bp_down_event(struct txgbe_adapter *adapter);
void txgbe_bp_watchdog_event(struct txgbe_adapter *adapter);
int txgbe_bp_mode_setting(struct txgbe_adapter *adapter);
void txgbe_bp_close_protect(struct txgbe_adapter *adapter);
int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapter);
int get_bkp_an73_ability(bkpan73ability *pt_bkp_an73_ability, unsigned char byLinkPartner,
struct txgbe_adapter *adapter);
int clr_bkp_an73_int(unsigned int intIndex, unsigned int intIndexHi,
struct txgbe_adapter *adapter);
int chk_bkp_an73_ability(bkpan73ability tBkpAn73Ability, bkpan73ability tLpBkpAn73Ability,
struct txgbe_adapter *adapter);
#endif

View File

@ -0,0 +1,653 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_dcb.c, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "txgbe_type.h"
#include "txgbe_dcb.h"
#include "txgbe.h"
/*
* txgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
* credits from the configured bandwidth percentages. Credits
* are the smallest unit programmable into the underlying
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
*/
s32 txgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
int max_frame_size)
{
int min_percent = 100;
int min_credit, multiplier;
int i;
min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) /
TXGBE_DCB_CREDIT_QUANTUM;
for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
if (bw[i] < min_percent && bw[i])
min_percent = bw[i];
}
multiplier = (min_credit / min_percent) + 1;
/* Find out the hw credits for each TC */
for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
int val = min(bw[i] * multiplier, TXGBE_DCB_MAX_CREDIT_REFILL);
if (val < min_credit)
val = min_credit;
refill[i] = (u16)val;
max[i] = (u16)(bw[i] ? (bw[i]*TXGBE_DCB_MAX_CREDIT)/100 : min_credit);
}
return 0;
}
/**
* txgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
* @txgbe_dcb_config: Struct containing DCB settings.
* @direction: Configuring either Tx or Rx.
*
* This function calculates the credits allocated to each traffic class.
* It should be called only after the rules are checked by
* txgbe_dcb_check_config_cee().
*/
s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *hw,
struct txgbe_dcb_config *dcb_config,
u32 max_frame_size, u8 direction)
{
struct txgbe_dcb_tc_path *p;
u32 min_multiplier = 0;
u16 min_percent = 100;
s32 ret_val = 0;
/* Initialization values default for Tx settings */
u32 min_credit = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u16 link_percentage = 0;
u8 bw_percent = 0;
u8 i;
UNREFERENCED_PARAMETER(hw);
if (dcb_config == NULL) {
ret_val = TXGBE_ERR_CONFIG;
goto out;
}
min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) /
TXGBE_DCB_CREDIT_QUANTUM;
/* Find smallest link percentage */
for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
link_percentage = p->bwg_percent;
link_percentage = (link_percentage * bw_percent) / 100;
if (link_percentage && link_percentage < min_percent)
min_percent = link_percentage;
}
/*
* The ratio between traffic classes will control the bandwidth
* percentages seen on the wire. To calculate this ratio we use
* a multiplier. It is required that the refill credits must be
* larger than the max frame size so here we find the smallest
* multiplier that will allow all bandwidth percentages to be
* greater than the max frame size.
*/
min_multiplier = (min_credit / min_percent) + 1;
/* Find out the link percentage for each TC first */
for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
link_percentage = p->bwg_percent;
/* Must be careful of integer division for very small nums */
link_percentage = (link_percentage * bw_percent) / 100;
if (p->bwg_percent > 0 && link_percentage == 0)
link_percentage = 1;
/* Save link_percentage for reference */
p->link_percent = (u8)link_percentage;
/* Calculate credit refill ratio using multiplier */
credit_refill = min(link_percentage * min_multiplier,
(u32)TXGBE_DCB_MAX_CREDIT_REFILL);
/* Refill at least minimum credit */
if (credit_refill < min_credit)
credit_refill = min_credit;
p->data_credits_refill = (u16)credit_refill;
/* Calculate maximum credit for the TC */
credit_max = (link_percentage * TXGBE_DCB_MAX_CREDIT) / 100;
/*
* Adjustment based on rule checking, if the percentage
* of a TC is too small, the maximum credit may not be
* enough to send out a jumbo frame in data plane arbitration.
*/
if (credit_max < min_credit)
credit_max = min_credit;
if (direction == TXGBE_DCB_TX_CONFIG) {
/*
* Adjustment based on rule checking, if the
* percentage of a TC is too small, the maximum
* credit may not be enough to send out a TSO
* packet in descriptor plane arbitration.
*/
dcb_config->tc_config[i].desc_credits_max =
(u16)credit_max;
}
p->data_credits_max = (u16)credit_max;
}
out:
return ret_val;
}
/**
* txgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
* @cfg: dcb configuration to unpack into hardware consumable fields
* @map: user priority to traffic class map
* @pfc_up: u8 to store user priority PFC bitmask
*
* This unpacks the dcb configuration PFC info which is stored per
* traffic class into a 8bit user priority bitmask that can be
* consumed by hardware routines. The priority to tc map must be
* updated before calling this routine to use current up-to maps.
*/
void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
{
struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int up;
/*
* If the TC for this user priority has PFC enabled then set the
* matching bit in 'pfc_up' to reflect that PFC is enabled.
*/
for (*pfc_up = 0, up = 0; up < TXGBE_DCB_MAX_USER_PRIORITY; up++) {
if (tc_config[map[up]].pfc != txgbe_dcb_pfc_disabled)
*pfc_up |= 1 << up;
}
}
void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *cfg, int direction,
u16 *refill)
{
struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int tc;
for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
refill[tc] = tc_config[tc].path[direction].data_credits_refill;
}
void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *cfg, u16 *max)
{
struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int tc;
for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
max[tc] = tc_config[tc].desc_credits_max;
}
void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *cfg, int direction,
u8 *bwgid)
{
struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int tc;
for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
bwgid[tc] = tc_config[tc].path[direction].bwg_id;
}
void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *cfg, int direction,
u8 *tsa)
{
struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int tc;
for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
tsa[tc] = tc_config[tc].path[direction].tsa;
}
u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *cfg, int direction, u8 up)
{
struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
u8 prio_mask = 1 << up;
u8 tc = cfg->num_tcs.pg_tcs;
/* If tc is 0 then DCB is likely not enabled or supported */
if (!tc)
goto out;
/*
* Test from maximum TC to 1 and report the first match we find. If
* we find no match we can assume that the TC is 0 since the TC must
* be set for all user priorities
*/
for (tc--; tc; tc--) {
if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
break;
}
out:
return tc;
}
void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *cfg, int direction,
u8 *map)
{
u8 up;
for (up = 0; up < TXGBE_DCB_MAX_USER_PRIORITY; up++)
map[up] = txgbe_dcb_get_tc_from_up(cfg, direction, up);
}
/**
* txgbe_dcb_config_tc_stats - Config traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
s32 txgbe_dcb_config_tc_stats(struct txgbe_hw *hw,
struct txgbe_dcb_config *dcb_config)
{
UNREFERENCED_PARAMETER(hw);
UNREFERENCED_PARAMETER(dcb_config);
return 0;
}
/**
* txgbe_dcb_hw_config_cee - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to txgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 txgbe_dcb_hw_config_cee(struct txgbe_hw *hw,
struct txgbe_dcb_config *dcb_config)
{
s32 ret = TXGBE_NOT_IMPLEMENTED;
u8 pfc_en;
u8 tsa[TXGBE_DCB_MAX_TRAFFIC_CLASS];
u8 bwgid[TXGBE_DCB_MAX_TRAFFIC_CLASS];
u8 map[TXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
u16 refill[TXGBE_DCB_MAX_TRAFFIC_CLASS];
u16 max[TXGBE_DCB_MAX_TRAFFIC_CLASS];
/* Unpack CEE standard containers */
txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_TX_CONFIG, refill);
txgbe_dcb_unpack_max_cee(dcb_config, max);
txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_TX_CONFIG, bwgid);
txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_TX_CONFIG, tsa);
txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_TX_CONFIG, map);
txgbe_dcb_config(hw, dcb_config);
ret = txgbe_dcb_hw_config(hw,
refill, max, bwgid,
tsa, map);
txgbe_dcb_config_tc_stats(hw, dcb_config);
if (!ret && dcb_config->pfc_mode_enable) {
txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
}
return ret;
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
s32 txgbe_dcb_config_pfc(struct txgbe_hw *hw, u8 pfc_en, u8 *map)
{
int ret = TXGBE_ERR_PARAM;
u32 i, j, fcrtl, reg;
u8 max_tc = 0;
/* Enable Transmit Priority Flow Control */
wr32(hw, TXGBE_RDB_RFCC, TXGBE_RDB_RFCC_RFCE_PRIORITY);
/* Enable Receive Priority Flow Control */
reg = 0;
if (pfc_en)
reg |= (TXGBE_MAC_RX_FLOW_CTRL_PFCE | 0x1);
wr32(hw, TXGBE_MAC_RX_FLOW_CTRL, reg);
for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++) {
if (map[i] > max_tc)
max_tc = map[i];
}
/* Configure PFC Tx thresholds per TC */
for (i = 0; i <= max_tc; i++) {
int enabled = 0;
for (j = 0; j < TXGBE_DCB_MAX_USER_PRIORITY; j++) {
if ((map[j] == i) && (pfc_en & (1 << j))) {
enabled = 1;
break;
}
}
if (enabled) {
reg = (hw->fc.high_water[i] << 10) |
TXGBE_RDB_RFCH_XOFFE;
fcrtl = (hw->fc.low_water[i] << 10) |
TXGBE_RDB_RFCL_XONE;
wr32(hw, TXGBE_RDB_RFCH(i), fcrtl);
} else {
/*
* In order to prevent Tx hangs when the internal Tx
* switch is enabled we must set the high water mark
* to the Rx packet buffer size - 24KB. This allows
* the Tx switch to function even under heavy Rx
* workloads.
*/
reg = rd32(hw, TXGBE_RDB_PB_SZ(i));
wr32(hw, TXGBE_RDB_RFCL(i), 0);
}
wr32(hw, TXGBE_RDB_RFCH(i), reg);
}
for (; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
wr32(hw, TXGBE_RDB_RFCL(i), 0);
wr32(hw, TXGBE_RDB_RFCH(i), 0);
}
/* Configure pause time (2 TCs per register) */
reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
for (i = 0; i < (TXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
wr32(hw, TXGBE_RDB_RFCV(i), reg);
/* Configure flow control refresh threshold value */
wr32(hw, TXGBE_RDB_RFCRT, hw->fc.pause_time / 2);
return ret;
}
s32 txgbe_dcb_hw_config(struct txgbe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *tsa, u8 *map)
{
txgbe_dcb_config_rx_arbiter(hw, refill, max, bwg_id,
tsa, map);
txgbe_dcb_config_tx_desc_arbiter(hw, refill, max,
bwg_id, tsa);
txgbe_dcb_config_tx_data_arbiter(hw, refill, max,
bwg_id, tsa, map);
return 0;
}
/**
* txgbe_dcb_config_rx_arbiter - Config Rx Data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to txgbe_dcb_config structure
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
s32 txgbe_dcb_config_rx_arbiter(struct txgbe_hw *hw, u16 *refill,
u16 *max, u8 *bwg_id, u8 *tsa,
u8 *map)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
/*
* Disable the arbiter before changing parameters
* (always enable recycle mode; WSP)
*/
reg = TXGBE_RDM_ARB_CTL_RRM | TXGBE_RDM_ARB_CTL_RAC |
TXGBE_RDM_ARB_CTL_ARBDIS;
wr32(hw, TXGBE_RDM_ARB_CTL, reg);
/*
* map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
* bits sets for the UPs that needs to be mappped to that TC.
* e.g if priorities 6 and 7 are to be mapped to a TC then the
* up_to_tc_bitmap value for that TC will be 11000000 in binary.
*/
reg = 0;
for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++)
reg |= (map[i] << (i * TXGBE_RDB_UP2TC_UP_SHIFT));
wr32(hw, TXGBE_RDB_UP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill |
(credit_max << TXGBE_RDM_ARB_CFG_MCL_SHIFT);
reg |= (u32)(bwg_id[i]) << TXGBE_RDM_ARB_CFG_BWG_SHIFT;
if (tsa[i] == txgbe_dcb_tsa_strict)
reg |= TXGBE_RDM_ARB_CFG_LSP;
wr32(hw, TXGBE_RDM_ARB_CFG(i), reg);
}
/*
* Configure Rx packet plane (recycle mode; WSP) and
* enable arbiter
*/
reg = TXGBE_RDM_ARB_CTL_RRM | TXGBE_RDM_ARB_CTL_RAC;
wr32(hw, TXGBE_RDM_ARB_CTL, reg);
return 0;
}
/**
* txgbe_dcb_config_tx_desc_arbiter - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to txgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 txgbe_dcb_config_tx_desc_arbiter(struct txgbe_hw *hw, u16 *refill,
u16 *max, u8 *bwg_id, u8 *tsa)
{
u32 reg, max_credits;
u8 i;
/* Clear the per-Tx queue credits; we use per-TC instead */
for (i = 0; i < 128; i++) {
wr32(hw, TXGBE_TDM_VM_CREDIT(i), 0);
}
/* Configure traffic class credits and priority */
for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << TXGBE_TDM_PBWARB_CFG_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << TXGBE_TDM_PBWARB_CFG_BWG_SHIFT;
if (tsa[i] == txgbe_dcb_tsa_group_strict_cee)
reg |= TXGBE_TDM_PBWARB_CFG_GSP;
if (tsa[i] == txgbe_dcb_tsa_strict)
reg |= TXGBE_TDM_PBWARB_CFG_LSP;
wr32(hw, TXGBE_TDM_PBWARB_CFG(i), reg);
}
/*
* Configure Tx descriptor plane (recycle mode; WSP) and
* enable arbiter
*/
reg = TXGBE_TDM_PBWARB_CTL_TDPAC | TXGBE_TDM_PBWARB_CTL_TDRM;
wr32(hw, TXGBE_TDM_PBWARB_CTL, reg);
return 0;
}
/**
* txgbe_dcb_config_tx_data_arbiter - Config Tx Data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to txgbe_dcb_config structure
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
s32 txgbe_dcb_config_tx_data_arbiter(struct txgbe_hw *hw, u16 *refill,
u16 *max, u8 *bwg_id, u8 *tsa,
u8 *map)
{
u32 reg;
u8 i;
/*
* Disable the arbiter before changing parameters
* (always enable recycle mode; SP; arb delay)
*/
reg = TXGBE_TDB_PBRARB_CTL_TPPAC | TXGBE_TDB_PBRARB_CTL_TPRM |
TXGBE_RTTPCS_ARBDIS;
wr32(hw, TXGBE_TDB_PBRARB_CTL, reg);
/*
* map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
* bits sets for the UPs that needs to be mappped to that TC.
* e.g if priorities 6 and 7 are to be mapped to a TC then the
* up_to_tc_bitmap value for that TC will be 11000000 in binary.
*/
reg = 0;
for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++)
reg |= (map[i] << (i * TXGBE_TDB_UP2TC_UP_SHIFT));
wr32(hw, TXGBE_TDB_UP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << TXGBE_TDB_PBRARB_CFG_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << TXGBE_TDB_PBRARB_CFG_BWG_SHIFT;
if (tsa[i] == txgbe_dcb_tsa_group_strict_cee)
reg |= TXGBE_TDB_PBRARB_CFG_GSP;
if (tsa[i] == txgbe_dcb_tsa_strict)
reg |= TXGBE_TDB_PBRARB_CFG_LSP;
wr32(hw, TXGBE_TDB_PBRARB_CFG(i), reg);
}
/*
* Configure Tx packet plane (recycle mode; SP; arb delay) and
* enable arbiter
*/
reg = TXGBE_TDB_PBRARB_CTL_TPPAC | TXGBE_TDB_PBRARB_CTL_TPRM;
wr32(hw, TXGBE_TDB_PBRARB_CTL, reg);
return 0;
}
/**
* txgbe_dcb_config - Configure general DCB parameters
* @hw: pointer to hardware structure
* @dcb_config: pointer to txgbe_dcb_config structure
*
* Configure general DCB parameters.
*/
s32 txgbe_dcb_config(struct txgbe_hw *hw,
struct txgbe_dcb_config *dcb_config)
{
u32 n, value;
struct txgbe_adapter *adapter = hw->back;
if (dcb_config->vt_mode)
adapter->flags |= TXGBE_FLAG_VMDQ_ENABLED;
else
adapter->flags &= ~TXGBE_FLAG_VMDQ_ENABLED;
if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) {
if (dcb_config->num_tcs.pg_tcs == 8)
/* 8 TCs */
value = TXGBE_CFG_PORT_CTL_NUM_TC_8 |
TXGBE_CFG_PORT_CTL_NUM_VT_16 |
TXGBE_CFG_PORT_CTL_DCB_EN;
else if (dcb_config->num_tcs.pg_tcs == 4)
/* 4 TCs */
value = TXGBE_CFG_PORT_CTL_NUM_TC_4 |
TXGBE_CFG_PORT_CTL_NUM_VT_32 |
TXGBE_CFG_PORT_CTL_DCB_EN;
else if (adapter->ring_feature[RING_F_RSS].indices == 4)
value = TXGBE_CFG_PORT_CTL_NUM_VT_32;
else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */
value = TXGBE_CFG_PORT_CTL_NUM_VT_64;
} else {
if (dcb_config->num_tcs.pg_tcs == 8)
value = TXGBE_CFG_PORT_CTL_NUM_TC_8 |
TXGBE_CFG_PORT_CTL_DCB_EN;
else if (dcb_config->num_tcs.pg_tcs == 4)
value = TXGBE_CFG_PORT_CTL_NUM_TC_4 |
TXGBE_CFG_PORT_CTL_DCB_EN;
else
value = 0;
}
value |= TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ;
wr32m(hw, TXGBE_CFG_PORT_CTL,
TXGBE_CFG_PORT_CTL_NUM_TC_MASK |
TXGBE_CFG_PORT_CTL_NUM_VT_MASK |
TXGBE_CFG_PORT_CTL_DCB_EN |
TXGBE_CFG_PORT_CTL_D_VLAN |
TXGBE_CFG_PORT_CTL_QINQ,
value);
/* Disable drop for all queues */
for (n = 0; n < 4; n++) {
wr32(hw, TXGBE_RDM_PF_QDE(n), 0x0);
}
return 0;
}

View File

@ -0,0 +1,208 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_dcb.h, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _TXGBE_DCB_H_
#define _TXGBE_DCB_H_
#include "txgbe_type.h"
/* DCB defines */
/* DCB credit calculation defines */
#define TXGBE_DCB_CREDIT_QUANTUM 64
#define TXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
#define TXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
#define TXGBE_DCB_MAX_CREDIT (2 * TXGBE_DCB_MAX_CREDIT_REFILL)
/* 513 for 32KB TSO packet */
#define TXGBE_DCB_MIN_TSO_CREDIT \
((TXGBE_DCB_MAX_TSO_SIZE / TXGBE_DCB_CREDIT_QUANTUM) + 1)
/* DCB configuration defines */
#define TXGBE_DCB_MAX_USER_PRIORITY 8
#define TXGBE_DCB_MAX_BW_GROUP 8
#define TXGBE_DCB_BW_PERCENT 100
#define TXGBE_DCB_TX_CONFIG 0
#define TXGBE_DCB_RX_CONFIG 1
/* DCB capability defines */
#define TXGBE_DCB_PG_SUPPORT 0x00000001
#define TXGBE_DCB_PFC_SUPPORT 0x00000002
#define TXGBE_DCB_BCN_SUPPORT 0x00000004
#define TXGBE_DCB_UP2TC_SUPPORT 0x00000008
#define TXGBE_DCB_GSP_SUPPORT 0x00000010
/* DCB register definitions */
#define TXGBE_TDM_PBWARB_CTL_TDPAC 0x00000001 /* 0 Round Robin,
* 1 WSP - Weighted Strict Priority
*/
#define TXGBE_TDM_PBWARB_CTL_TDRM 0x00000010 /* Transmit Recycle Mode */
#define TXGBE_TDM_PBWARB_CTL_ARBDIS 0x00000040 /* DCB arbiter disable */
/* Receive UP2TC mapping */
#define TXGBE_RDB_UP2TC_UP_SHIFT 4
#define TXGBE_RDB_UP2TC_UP_MASK 7
/* Transmit UP2TC mapping */
#define TXGBE_TDB_UP2TC_UP_SHIFT 4
#define TXGBE_RDM_ARB_CFG_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
#define TXGBE_RDM_ARB_CFG_BWG_SHIFT 9 /* Offset to BWG index */
#define TXGBE_RDM_ARB_CFG_GSP 0x40000000 /* GSP enable bit */
#define TXGBE_RDM_ARB_CFG_LSP 0x80000000 /* LSP enable bit */
/* RTRPCS Bit Masks */
#define TXGBE_RDM_ARB_CTL_RRM 0x00000002 /* Receive Recycle Mode enable */
/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
#define TXGBE_RDM_ARB_CTL_RAC 0x00000004
#define TXGBE_RDM_ARB_CTL_ARBDIS 0x00000040 /* Arbitration disable bit */
/* RTTDT2C Bit Masks */
#define TXGBE_TDM_PBWARB_CFG_MCL_SHIFT 12
#define TXGBE_TDM_PBWARB_CFG_BWG_SHIFT 9
#define TXGBE_TDM_PBWARB_CFG_GSP 0x40000000
#define TXGBE_TDM_PBWARB_CFG_LSP 0x80000000
#define TXGBE_TDB_PBRARB_CFG_MCL_SHIFT 12
#define TXGBE_TDB_PBRARB_CFG_BWG_SHIFT 9
#define TXGBE_TDB_PBRARB_CFG_GSP 0x40000000
#define TXGBE_TDB_PBRARB_CFG_LSP 0x80000000
/* RTTPCS Bit Masks */
#define TXGBE_TDB_PBRARB_CTL_TPPAC 0x00000020 /* 0 Round Robin,
* 1 SP - Strict Priority
*/
#define TXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
#define TXGBE_TDB_PBRARB_CTL_TPRM 0x00000100 /* Transmit Recycle Mode enable*/
#define TXGBE_TDM_PB_THRE_DCB 0xA /* THRESH value for DCB mode */
struct txgbe_dcb_support {
u32 capabilities; /* DCB capabilities */
/* Each bit represents a number of TCs configurable in the hw.
* If 8 traffic classes can be configured, the value is 0x80. */
u8 traffic_classes;
u8 pfc_traffic_classes;
};
enum txgbe_dcb_tsa {
txgbe_dcb_tsa_ets = 0,
txgbe_dcb_tsa_group_strict_cee,
txgbe_dcb_tsa_strict
};
/* Traffic class bandwidth allocation per direction */
struct txgbe_dcb_tc_path {
u8 bwg_id; /* Bandwidth Group (BWG) ID */
u8 bwg_percent; /* % of BWG's bandwidth */
u8 link_percent; /* % of link bandwidth */
u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
u16 data_credits_refill; /* Credit refill amount in 64B granularity */
u16 data_credits_max; /* Max credits for a configured packet buffer
* in 64B granularity.*/
enum txgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
};
enum txgbe_dcb_pfc {
txgbe_dcb_pfc_disabled = 0,
txgbe_dcb_pfc_enabled,
txgbe_dcb_pfc_enabled_txonly,
txgbe_dcb_pfc_enabled_rxonly
};
/* Traffic class configuration */
struct txgbe_dcb_tc_config {
struct txgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
enum txgbe_dcb_pfc pfc; /* Class based flow control setting */
u16 desc_credits_max; /* For Tx Descriptor arbitration */
u8 tc; /* Traffic class (TC) */
};
enum txgbe_dcb_pba {
/* PBA[0-7] each use 64KB FIFO */
txgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
/* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
txgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
};
struct txgbe_dcb_num_tcs {
u8 pg_tcs;
u8 pfc_tcs;
};
struct txgbe_dcb_config {
struct txgbe_dcb_tc_config tc_config[TXGBE_DCB_MAX_TRAFFIC_CLASS];
struct txgbe_dcb_support support;
struct txgbe_dcb_num_tcs num_tcs;
u8 bw_percentage[2][TXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
bool pfc_mode_enable;
bool round_robin_enable;
enum txgbe_dcb_pba rx_pba_cfg;
u32 dcb_cfg_version; /* Not used...OS-specific? */
u32 link_speed; /* For bandwidth allocation validation purpose */
bool vt_mode;
};
/* DCB driver APIs */
/* DCB credits calculation */
s32 txgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *,
struct txgbe_dcb_config *, u32, u8);
/* DCB PFC */
s32 txgbe_dcb_config_pfc(struct txgbe_hw *, u8, u8 *);
/* DCB stats */
s32 txgbe_dcb_config_tc_stats(struct txgbe_hw *,
struct txgbe_dcb_config *);
/* DCB config arbiters */
s32 txgbe_dcb_config_tx_desc_arbiter(struct txgbe_hw *, u16 *, u16 *,
u8 *, u8 *);
s32 txgbe_dcb_config_tx_data_arbiter(struct txgbe_hw *, u16 *, u16 *,
u8 *, u8 *, u8 *);
s32 txgbe_dcb_config_rx_arbiter(struct txgbe_hw *, u16 *, u16 *, u8 *,
u8 *, u8 *);
/* DCB unpack routines */
void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *, u8 *, u8 *);
void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *, int, u16 *);
void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *, u16 *);
void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *, int, u8 *);
void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *, int, u8 *);
void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *, int, u8 *);
u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *, int, u8);
/* DCB initialization */
s32 txgbe_dcb_config(struct txgbe_hw *,
struct txgbe_dcb_config *);
s32 txgbe_dcb_hw_config(struct txgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
s32 txgbe_dcb_hw_config_cee(struct txgbe_hw *, struct txgbe_dcb_config *);
#endif /* _TXGBE_DCB_H_ */

View File

@ -0,0 +1,869 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_dcb_nl.c, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "txgbe.h"
#if IS_ENABLED(CONFIG_DCB)
#include <linux/dcbnl.h>
#include "txgbe_dcb.h"
/* Callbacks for DCB netlink in the kernel */
#define BIT_DCB_MODE 0x01
#define BIT_PFC 0x02
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
#define BIT_RESETLINK 0x40
#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
int txgbe_copy_dcb_cfg(struct txgbe_adapter *adapter, int tc_max)
{
struct txgbe_dcb_config *scfg = &adapter->temp_dcb_cfg;
struct txgbe_dcb_config *dcfg = &adapter->dcb_cfg;
struct txgbe_dcb_tc_config *src = NULL;
struct txgbe_dcb_tc_config *dst = NULL;
int i, j;
int tx = TXGBE_DCB_TX_CONFIG;
int rx = TXGBE_DCB_RX_CONFIG;
int changes = 0;
#if IS_ENABLED(CONFIG_FCOE)
if (adapter->fcoe.up_set != adapter->fcoe.up)
changes |= BIT_APP_UPCHG;
#endif /* CONFIG_FCOE */
for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0];
if (dst->path[tx].tsa != src->path[tx].tsa) {
dst->path[tx].tsa = src->path[tx].tsa;
changes |= BIT_PG_TX;
}
if (dst->path[tx].bwg_id != src->path[tx].bwg_id) {
dst->path[tx].bwg_id = src->path[tx].bwg_id;
changes |= BIT_PG_TX;
}
if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) {
dst->path[tx].bwg_percent = src->path[tx].bwg_percent;
changes |= BIT_PG_TX;
}
if (dst->path[tx].up_to_tc_bitmap !=
src->path[tx].up_to_tc_bitmap) {
dst->path[tx].up_to_tc_bitmap =
src->path[tx].up_to_tc_bitmap;
changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG);
}
if (dst->path[rx].tsa != src->path[rx].tsa) {
dst->path[rx].tsa = src->path[rx].tsa;
changes |= BIT_PG_RX;
}
if (dst->path[rx].bwg_id != src->path[rx].bwg_id) {
dst->path[rx].bwg_id = src->path[rx].bwg_id;
changes |= BIT_PG_RX;
}
if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) {
dst->path[rx].bwg_percent = src->path[rx].bwg_percent;
changes |= BIT_PG_RX;
}
if (dst->path[rx].up_to_tc_bitmap !=
src->path[rx].up_to_tc_bitmap) {
dst->path[rx].up_to_tc_bitmap =
src->path[rx].up_to_tc_bitmap;
changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG);
}
}
for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
j = i - DCB_PG_ATTR_BW_ID_0;
if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) {
dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j];
changes |= BIT_PG_TX;
}
if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) {
dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j];
changes |= BIT_PG_RX;
}
}
for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
j = i - DCB_PFC_UP_ATTR_0;
if (dcfg->tc_config[j].pfc != scfg->tc_config[j].pfc) {
dcfg->tc_config[j].pfc = scfg->tc_config[j].pfc;
changes |= BIT_PFC;
}
}
if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) {
dcfg->pfc_mode_enable = scfg->pfc_mode_enable;
changes |= BIT_PFC;
}
return changes;
}
static u8 txgbe_dcbnl_get_state(struct net_device *netdev)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
return !!(adapter->flags & TXGBE_FLAG_DCB_ENABLED);
}
static u8 txgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
int err = 0;
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
/* verify there is something to do, if not then exit */
if (!state == !(adapter->flags & TXGBE_FLAG_DCB_ENABLED))
goto out;
err = txgbe_setup_tc(netdev,
state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
out:
return !!err;
}
static void txgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
int i, j;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
for (i = 0; i < netdev->addr_len; i++)
perm_addr[i] = adapter->hw.mac.perm_addr[i];
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
}
static void txgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].tsa = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
bw_pct;
if (up_map != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
up_map;
}
static void txgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
}
static void txgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].tsa = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
bw_pct;
if (up_map != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
up_map;
}
static void txgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
}
static void txgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[0].tsa;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
*bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
*up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
}
static void txgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
}
static void txgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[1].tsa;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
*bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
*up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
}
static void txgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
}
static void txgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int up, u8 pfc)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
u8 tc = txgbe_dcb_get_tc_from_up(&adapter->temp_dcb_cfg, 0, up);
adapter->temp_dcb_cfg.tc_config[tc].pfc = pfc;
if (adapter->temp_dcb_cfg.tc_config[tc].pfc !=
adapter->dcb_cfg.tc_config[tc].pfc)
adapter->temp_dcb_cfg.pfc_mode_enable = true;
}
static void txgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int up, u8 *pfc)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
u8 tc = txgbe_dcb_get_tc_from_up(&adapter->dcb_cfg, 0, up);
*pfc = adapter->dcb_cfg.tc_config[tc].pfc;
}
static void txgbe_dcbnl_devreset(struct net_device *dev)
{
struct txgbe_adapter *adapter = netdev_priv(dev);
while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (netif_running(dev))
#ifdef HAVE_NET_DEVICE_OPS
dev->netdev_ops->ndo_stop(dev);
#else
dev->stop(dev);
#endif
txgbe_clear_interrupt_scheme(adapter);
txgbe_init_interrupt_scheme(adapter);
if (netif_running(dev))
#ifdef HAVE_NET_DEVICE_OPS
dev->netdev_ops->ndo_open(dev);
#else
dev->open(dev);
#endif
clear_bit(__TXGBE_RESETTING, &adapter->state);
}
static u8 txgbe_dcbnl_set_all(struct net_device *netdev)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
struct txgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
struct txgbe_hw *hw = &adapter->hw;
int ret = DCB_NO_HW_CHG;
u8 prio_tc[TXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ret;
adapter->dcb_set_bitmap |= txgbe_copy_dcb_cfg(adapter,
TXGBE_DCB_MAX_TRAFFIC_CLASS);
if (!adapter->dcb_set_bitmap)
return ret;
txgbe_dcb_unpack_map_cee(dcb_cfg, TXGBE_DCB_TX_CONFIG, prio_tc);
if (adapter->dcb_set_bitmap & (BIT_PG_TX | BIT_PG_RX)) {
/* Priority to TC mapping in CEE case default to 1:1 */
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
#ifdef HAVE_MQPRIO
int i;
#endif
#if IS_ENABLED(CONFIG_FCOE)
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
max_frame = max(max_frame, TXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
txgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame,
TXGBE_DCB_TX_CONFIG);
txgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame,
TXGBE_DCB_RX_CONFIG);
txgbe_dcb_hw_config_cee(hw, dcb_cfg);
#ifdef HAVE_MQPRIO
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
#endif /* HAVE_MQPRIO */
ret = DCB_HW_CHG_RST;
}
if (adapter->dcb_set_bitmap & BIT_PFC) {
if (dcb_cfg->pfc_mode_enable) {
u8 pfc_en;
txgbe_dcb_unpack_pfc_cee(dcb_cfg, prio_tc, &pfc_en);
txgbe_dcb_config_pfc(hw, pfc_en, prio_tc);
} else {
TCALL(hw, mac.ops.fc_enable);
}
txgbe_set_rx_drop_en(adapter);
if (ret != DCB_HW_CHG_RST)
ret = DCB_HW_CHG;
}
#if IS_ENABLED(CONFIG_FCOE)
/* Reprogam FCoE hardware offloads when the traffic class
* FCoE is using changes. This happens if the APP info
* changes or the up2tc mapping is updated.
*/
if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
adapter->fcoe.up_set = adapter->fcoe.up;
txgbe_dcbnl_devreset(netdev);
ret = DCB_HW_CHG_RST;
}
#endif /* CONFIG_FCOE */
adapter->dcb_set_bitmap = 0x00;
return ret;
}
static u8 txgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
{
#ifdef HAVE_DCBNL_IEEE
struct txgbe_adapter *adapter = netdev_priv(netdev);
#endif
switch (capid) {
case DCB_CAP_ATTR_PG:
*cap = true;
break;
case DCB_CAP_ATTR_PFC:
*cap = true;
break;
case DCB_CAP_ATTR_UP2TC:
*cap = false;
break;
case DCB_CAP_ATTR_PG_TCS:
*cap = 0x80;
break;
case DCB_CAP_ATTR_PFC_TCS:
*cap = 0x80;
break;
case DCB_CAP_ATTR_GSP:
*cap = true;
break;
case DCB_CAP_ATTR_BCN:
*cap = false;
break;
#ifdef HAVE_DCBNL_IEEE
case DCB_CAP_ATTR_DCBX:
*cap = adapter->dcbx_cap;
break;
#endif
default:
*cap = false;
break;
}
return 0;
}
#ifdef NUMTCS_RETURNS_U8
static u8 txgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
#else
static int txgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
#endif
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
u8 rval = 0;
if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
case DCB_NUMTCS_ATTR_PG:
*num = adapter->dcb_cfg.num_tcs.pg_tcs;
break;
case DCB_NUMTCS_ATTR_PFC:
*num = adapter->dcb_cfg.num_tcs.pfc_tcs;
break;
default:
rval = -EINVAL;
break;
}
} else {
rval = -EINVAL;
}
return rval;
}
#ifdef NUMTCS_RETURNS_U8
static u8 txgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
#else
static int txgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
#endif
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
u8 rval = 0;
if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
case DCB_NUMTCS_ATTR_PG:
adapter->dcb_cfg.num_tcs.pg_tcs = num;
break;
case DCB_NUMTCS_ATTR_PFC:
adapter->dcb_cfg.num_tcs.pfc_tcs = num;
break;
default:
rval = -EINVAL;
break;
}
} else {
rval = -EINVAL;
}
return rval;
}
static u8 txgbe_dcbnl_getpfcstate(struct net_device *netdev)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
return adapter->dcb_cfg.pfc_mode_enable;
}
static void txgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
adapter->temp_dcb_cfg.pfc_mode_enable = state;
return;
}
#ifdef HAVE_DCBNL_OPS_GETAPP
/**
* txgbe_dcbnl_getapp - retrieve the DCBX application user priority
* @netdev : the corresponding netdev
* @idtype : identifies the id as ether type or TCP/UDP port number
* @id: id is either ether type or TCP/UDP port number
*
* Returns : on success, returns a non-zero 802.1p user priority bitmap
* otherwise returns 0 as the invalid user priority bitmap to indicate an
* error.
*/
#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT
static int txgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
#else
static u8 txgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
#endif
{
u8 rval = 0;
#ifdef HAVE_DCBNL_IEEE
struct dcb_app app = {
.selector = idtype,
.protocol = id,
};
rval = dcb_getapp(netdev, &app);
#endif
switch (idtype) {
case DCB_APP_IDTYPE_ETHTYPE:
#if IS_ENABLED(CONFIG_FCOE)
if (id == ETH_P_FCOE)
rval = txgbe_fcoe_getapp(netdev);
#endif
break;
case DCB_APP_IDTYPE_PORTNUM:
break;
default:
break;
}
return rval;
}
/**
* txgbe_dcbnl_setapp - set the DCBX application user priority
* @netdev : the corresponding netdev
* @idtype : identifies the id as ether type or TCP/UDP port number
* @id: id is either ether type or TCP/UDP port number
* @up: the 802.1p user priority bitmap
*
* Returns : 0 on success or 1 on error
*/
#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT
static int txgbe_dcbnl_setapp(struct net_device *netdev,
#else
static u8 txgbe_dcbnl_setapp(struct net_device *netdev,
#endif
u8 idtype, u16 id, u8 up)
{
int err = 0;
#ifdef HAVE_DCBNL_IEEE
struct dcb_app app;
app.selector = idtype;
app.protocol = id;
app.priority = up;
err = dcb_setapp(netdev, &app);
#endif
switch (idtype) {
case DCB_APP_IDTYPE_ETHTYPE:
#if IS_ENABLED(CONFIG_FCOE)
if (id == ETH_P_FCOE) {
struct txgbe_adapter *adapter = netdev_priv(netdev);
adapter->fcoe.up = up ? ffs(up) - 1 : TXGBE_FCOE_DEFUP;
}
#endif
break;
case DCB_APP_IDTYPE_PORTNUM:
break;
default:
break;
}
return err;
}
#endif /* HAVE_DCBNL_OPS_GETAPP */
#ifdef HAVE_DCBNL_IEEE
static int txgbe_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
struct txgbe_adapter *adapter = netdev_priv(dev);
struct ieee_ets *my_ets = adapter->txgbe_ieee_ets;
/* No IEEE PFC settings available */
if (!my_ets)
return -EINVAL;
ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
return 0;
}
static int txgbe_dcbnl_ieee_setets(struct net_device *dev,
struct ieee_ets *ets)
{
struct txgbe_adapter *adapter = netdev_priv(dev);
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err = 0;
__u8 max_tc = 0;
__u8 map_chg = 0;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (!adapter->txgbe_ieee_ets) {
adapter->txgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
GFP_KERNEL);
if (!adapter->txgbe_ieee_ets)
return -ENOMEM;
/* initialize UP2TC mappings to invalid value */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
adapter->txgbe_ieee_ets->prio_tc[i] =
IEEE_8021QAZ_MAX_TCS;
/* if possible update UP2TC mappings from HW */
TCALL(&adapter->hw, mac.ops.get_rtrup2tc,
adapter->txgbe_ieee_ets->prio_tc);
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
if (ets->prio_tc[i] != adapter->txgbe_ieee_ets->prio_tc[i])
map_chg = 1;
}
memcpy(adapter->txgbe_ieee_ets, ets, sizeof(*adapter->txgbe_ieee_ets));
if (max_tc)
max_tc++;
if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
return -EINVAL;
if (max_tc != netdev_get_num_tc(dev))
err = txgbe_setup_tc(dev, max_tc);
else if (map_chg)
txgbe_dcbnl_devreset(dev);
if (err)
goto err_out;
err = txgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
err_out:
return err;
}
static int txgbe_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct txgbe_adapter *adapter = netdev_priv(dev);
struct ieee_pfc *my_pfc = adapter->txgbe_ieee_pfc;
int i;
/* No IEEE PFC settings available */
if (!my_pfc)
return -EINVAL;
pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
pfc->pfc_en = my_pfc->pfc_en;
pfc->mbc = my_pfc->mbc;
pfc->delay = my_pfc->delay;
for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
pfc->requests[i] = adapter->stats.pxoffrxc[i];
pfc->indications[i] = adapter->stats.pxofftxc[i];
}
return 0;
}
static int txgbe_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct txgbe_adapter *adapter = netdev_priv(dev);
struct txgbe_hw *hw = &adapter->hw;
u8 *prio_tc;
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (!adapter->txgbe_ieee_pfc) {
adapter->txgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
GFP_KERNEL);
if (!adapter->txgbe_ieee_pfc)
return -ENOMEM;
}
prio_tc = adapter->txgbe_ieee_ets->prio_tc;
memcpy(adapter->txgbe_ieee_pfc, pfc, sizeof(*adapter->txgbe_ieee_pfc));
/* Enable link flow control parameters if PFC is disabled */
if (pfc->pfc_en)
err = txgbe_dcb_config_pfc(hw, pfc->pfc_en, prio_tc);
else
err = TCALL(hw, mac.ops.fc_enable);
txgbe_set_rx_drop_en(adapter);
return err;
}
static int txgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
struct txgbe_adapter *adapter = netdev_priv(dev);
int err = -EINVAL;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return err;
err = dcb_ieee_setapp(dev, app);
#if IS_ENABLED(CONFIG_FCOE)
if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up))
return err;
adapter->fcoe.up = app->priority;
adapter->fcoe.up_set = adapter->fcoe.up;
txgbe_dcbnl_devreset(dev);
}
#endif
return 0;
}
#ifdef HAVE_DCBNL_IEEE_DELAPP
static int txgbe_dcbnl_ieee_delapp(struct net_device *dev,
struct dcb_app *app)
{
struct txgbe_adapter *adapter = netdev_priv(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
err = dcb_ieee_delapp(dev, app);
#if IS_ENABLED(CONFIG_FCOE)
if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up))
return err;
adapter->fcoe.up = app_mask ?
ffs(app_mask) - 1 : TXGBE_FCOE_DEFUP;
txgbe_dcbnl_devreset(dev);
}
#endif
return err;
}
#endif /* HAVE_DCBNL_IEEE_DELAPP */
static u8 txgbe_dcbnl_getdcbx(struct net_device *dev)
{
struct txgbe_adapter *adapter = netdev_priv(dev);
return adapter->dcbx_cap;
}
static u8 txgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
struct txgbe_adapter *adapter = netdev_priv(dev);
struct ieee_ets ets = { .ets_cap = 0 };
struct ieee_pfc pfc = { .pfc_en = 0 };
/* no support for LLD_MANAGED modes or CEE+IEEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
!(mode & DCB_CAP_DCBX_HOST))
return 1;
if (mode == adapter->dcbx_cap)
return 0;
adapter->dcbx_cap = mode;
/* ETS and PFC defaults */
ets.ets_cap = 8;
pfc.pfc_cap = 8;
if (mode & DCB_CAP_DCBX_VER_IEEE) {
txgbe_dcbnl_ieee_setets(dev, &ets);
txgbe_dcbnl_ieee_setpfc(dev, &pfc);
} else if (mode & DCB_CAP_DCBX_VER_CEE) {
u8 mask = (BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG);
adapter->dcb_set_bitmap |= mask;
txgbe_dcbnl_set_all(dev);
} else {
/* Drop into single TC mode strict priority as this
* indicates CEE and IEEE versions are disabled
*/
txgbe_dcbnl_ieee_setets(dev, &ets);
txgbe_dcbnl_ieee_setpfc(dev, &pfc);
txgbe_setup_tc(dev, 0);
}
return 0;
}
#endif
struct dcbnl_rtnl_ops dcbnl_ops = {
#ifdef HAVE_DCBNL_IEEE
.ieee_getets = txgbe_dcbnl_ieee_getets,
.ieee_setets = txgbe_dcbnl_ieee_setets,
.ieee_getpfc = txgbe_dcbnl_ieee_getpfc,
.ieee_setpfc = txgbe_dcbnl_ieee_setpfc,
.ieee_setapp = txgbe_dcbnl_ieee_setapp,
#ifdef HAVE_DCBNL_IEEE_DELAPP
.ieee_delapp = txgbe_dcbnl_ieee_delapp,
#endif
#endif
.getstate = txgbe_dcbnl_get_state,
.setstate = txgbe_dcbnl_set_state,
.getpermhwaddr = txgbe_dcbnl_get_perm_hw_addr,
.setpgtccfgtx = txgbe_dcbnl_set_pg_tc_cfg_tx,
.setpgbwgcfgtx = txgbe_dcbnl_set_pg_bwg_cfg_tx,
.setpgtccfgrx = txgbe_dcbnl_set_pg_tc_cfg_rx,
.setpgbwgcfgrx = txgbe_dcbnl_set_pg_bwg_cfg_rx,
.getpgtccfgtx = txgbe_dcbnl_get_pg_tc_cfg_tx,
.getpgbwgcfgtx = txgbe_dcbnl_get_pg_bwg_cfg_tx,
.getpgtccfgrx = txgbe_dcbnl_get_pg_tc_cfg_rx,
.getpgbwgcfgrx = txgbe_dcbnl_get_pg_bwg_cfg_rx,
.setpfccfg = txgbe_dcbnl_set_pfc_cfg,
.getpfccfg = txgbe_dcbnl_get_pfc_cfg,
.setall = txgbe_dcbnl_set_all,
.getcap = txgbe_dcbnl_getcap,
.getnumtcs = txgbe_dcbnl_getnumtcs,
.setnumtcs = txgbe_dcbnl_setnumtcs,
.getpfcstate = txgbe_dcbnl_getpfcstate,
.setpfcstate = txgbe_dcbnl_setpfcstate,
#ifdef HAVE_DCBNL_OPS_GETAPP
.getapp = txgbe_dcbnl_getapp,
.setapp = txgbe_dcbnl_setapp,
#endif
#ifdef HAVE_DCBNL_IEEE
.getdcbx = txgbe_dcbnl_getdcbx,
.setdcbx = txgbe_dcbnl_setdcbx,
#endif
};
#endif /* CONFIG_DCB */

View File

@ -0,0 +1,801 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_debugfs.c, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "txgbe.h"
#ifdef HAVE_TXGBE_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/module.h>
static struct dentry *txgbe_dbg_root;
static int txgbe_data_mode;
#define TXGBE_DATA_FUNC(dm) ((dm) & ~0xFFFF)
#define TXGBE_DATA_ARGS(dm) ((dm) & 0xFFFF)
enum txgbe_data_func {
TXGBE_FUNC_NONE = (0 << 16),
TXGBE_FUNC_DUMP_BAR = (1 << 16),
TXGBE_FUNC_DUMP_RDESC = (2 << 16),
TXGBE_FUNC_DUMP_TDESC = (3 << 16),
TXGBE_FUNC_FLASH_READ = (4 << 16),
TXGBE_FUNC_FLASH_WRITE = (5 << 16),
};
/**
* data operation
**/
ssize_t
txgbe_simple_read_from_pcibar(struct txgbe_adapter *adapter, int res,
void __user *buf, size_t size, loff_t *ppos)
{
loff_t pos = *ppos;
u32 miss, len, limit = pci_resource_len(adapter->pdev, res);
if (pos < 0)
return 0;
limit = (pos + size <= limit ? pos + size : limit);
for (miss = 0; pos < limit && !miss; buf += len, pos += len) {
u32 val = 0, reg = round_down(pos, 4);
u32 off = pos - reg;
len = (reg + 4 <= limit ? 4 - off : 4 - off - (limit - reg - 4));
val = txgbe_rd32(adapter->io_addr + reg);
miss = copy_to_user(buf, &val + off, len);
}
size = pos - *ppos - miss;
*ppos += size;
return size;
}
ssize_t
txgbe_simple_read_from_flash(struct txgbe_adapter *adapter,
void __user *buf, size_t size, loff_t *ppos)
{
struct txgbe_hw *hw = &adapter->hw;
loff_t pos = *ppos;
size_t ret = 0;
loff_t rpos, rtail;
void __user *to = buf;
size_t available = adapter->hw.flash.dword_size << 2;
if (pos < 0)
return -EINVAL;
if (pos >= available || !size)
return 0;
if (size > available - pos)
size = available - pos;
rpos = round_up(pos, 4);
rtail = round_down(pos + size, 4);
if (rtail < rpos)
return 0;
to += rpos - pos;
while (rpos <= rtail) {
u32 value = txgbe_rd32(adapter->io_addr + rpos);
if (TCALL(hw, flash.ops.write_buffer, rpos>>2, 1, &value)) {
ret = size;
break;
}
if (4 == copy_to_user(to, &value, 4)) {
ret = size;
break;
}
to += 4;
rpos += 4;
}
if (ret == size)
return -EFAULT;
size -= ret;
*ppos = pos + size;
return size;
}
ssize_t
txgbe_simple_write_to_flash(struct txgbe_adapter *adapter,
const void __user *from, size_t size, loff_t *ppos, size_t available)
{
return size;
}
static ssize_t
txgbe_dbg_data_ops_read(struct file *filp, char __user *buffer,
size_t size, loff_t *ppos)
{
struct txgbe_adapter *adapter = filp->private_data;
u32 func = TXGBE_DATA_FUNC(txgbe_data_mode);
rmb();
switch (func) {
case TXGBE_FUNC_DUMP_BAR: {
u32 bar = TXGBE_DATA_ARGS(txgbe_data_mode);
return txgbe_simple_read_from_pcibar(adapter, bar, buffer, size,
ppos);
}
case TXGBE_FUNC_FLASH_READ: {
return txgbe_simple_read_from_flash(adapter, buffer, size, ppos);
}
case TXGBE_FUNC_DUMP_RDESC: {
struct txgbe_ring *ring;
u32 queue = TXGBE_DATA_ARGS(txgbe_data_mode);
if (queue >= adapter->num_rx_queues)
return 0;
queue += VMDQ_P(0) * adapter->queues_per_pool;
ring = adapter->rx_ring[queue];
return simple_read_from_buffer(buffer, size, ppos,
ring->desc, ring->size);
}
case TXGBE_FUNC_DUMP_TDESC: {
struct txgbe_ring *ring;
u32 queue = TXGBE_DATA_ARGS(txgbe_data_mode);
if (queue >= adapter->num_tx_queues)
return 0;
queue += VMDQ_P(0) * adapter->queues_per_pool;
ring = adapter->tx_ring[queue];
return simple_read_from_buffer(buffer, size, ppos,
ring->desc, ring->size);
}
default:
break;
}
return 0;
}
static ssize_t
txgbe_dbg_data_ops_write(struct file *filp,
const char __user *buffer,
size_t size, loff_t *ppos)
{
struct txgbe_adapter *adapter = filp->private_data;
u32 func = TXGBE_DATA_FUNC(txgbe_data_mode);
rmb();
switch (func) {
case TXGBE_FUNC_FLASH_WRITE: {
u32 size = TXGBE_DATA_ARGS(txgbe_data_mode);
if (size > adapter->hw.flash.dword_size << 2)
size = adapter->hw.flash.dword_size << 2;
return txgbe_simple_write_to_flash(adapter, buffer, size, ppos, size);
}
default:
break;
}
return size;
}
static struct file_operations txgbe_dbg_data_ops_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = txgbe_dbg_data_ops_read,
.write = txgbe_dbg_data_ops_write,
};
/**
* reg_ops operation
**/
static char txgbe_dbg_reg_ops_buf[256] = "";
static ssize_t
txgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct txgbe_adapter *adapter = filp->private_data;
char *buf;
int len;
/* don't allow partial reads */
if (*ppos != 0)
return 0;
buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n",
adapter->netdev->name, txgbe_data_mode,
txgbe_dbg_reg_ops_buf);
if (!buf)
return -ENOMEM;
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
static ssize_t
txgbe_dbg_reg_ops_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
struct txgbe_adapter *adapter = filp->private_data;
char *pc = txgbe_dbg_reg_ops_buf;
int len;
/* don't allow partial writes */
if (*ppos != 0)
return 0;
if (count >= sizeof(txgbe_dbg_reg_ops_buf))
return -ENOSPC;
len = simple_write_to_buffer(txgbe_dbg_reg_ops_buf,
sizeof(txgbe_dbg_reg_ops_buf)-1,
ppos,
buffer,
count);
if (len < 0)
return len;
pc[len] = '\0';
if (strncmp(pc, "dump", 4) == 0) {
u32 mode = 0;
u16 args;
pc += 4;
pc += strspn(pc, " \t");
if (!strncmp(pc, "bar", 3)) {
pc += 3;
mode = TXGBE_FUNC_DUMP_BAR;
} else if (!strncmp(pc, "rdesc", 5)) {
pc += 5;
mode = TXGBE_FUNC_DUMP_RDESC;
} else if (!strncmp(pc, "tdesc", 5)) {
pc += 5;
mode = TXGBE_FUNC_DUMP_TDESC;
} else {
txgbe_dump(adapter);
}
if (mode && 1 == sscanf(pc, "%hu", &args)) {
mode |= args;
}
txgbe_data_mode = mode;
} else if (strncmp(pc, "flash", 4) == 0) {
u32 mode = 0;
u16 args;
pc += 5;
pc += strspn(pc, " \t");
if (!strncmp(pc, "read", 3)) {
pc += 4;
mode = TXGBE_FUNC_FLASH_READ;
} else if (!strncmp(pc, "write", 5)) {
pc += 5;
mode = TXGBE_FUNC_FLASH_WRITE;
}
if (mode && 1 == sscanf(pc, "%hu", &args)) {
mode |= args;
}
txgbe_data_mode = mode;
} else if (strncmp(txgbe_dbg_reg_ops_buf, "write", 5) == 0) {
u32 reg, value;
int cnt;
cnt = sscanf(&txgbe_dbg_reg_ops_buf[5], "%x %x", &reg, &value);
if (cnt == 2) {
wr32(&adapter->hw, reg, value);
e_dev_info("write: 0x%08x = 0x%08x\n", reg, value);
} else {
e_dev_info("write <reg> <value>\n");
}
} else if (strncmp(txgbe_dbg_reg_ops_buf, "read", 4) == 0) {
u32 reg, value;
int cnt;
cnt = sscanf(&txgbe_dbg_reg_ops_buf[4], "%x", &reg);
if (cnt == 1) {
value = rd32(&adapter->hw, reg);
e_dev_info("read 0x%08x = 0x%08x\n", reg, value);
} else {
e_dev_info("read <reg>\n");
}
} else {
e_dev_info("Unknown command %s\n", txgbe_dbg_reg_ops_buf);
e_dev_info("Available commands:\n");
e_dev_info(" read <reg>\n");
e_dev_info(" write <reg> <value>\n");
}
return count;
}
static const struct file_operations txgbe_dbg_reg_ops_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = txgbe_dbg_reg_ops_read,
.write = txgbe_dbg_reg_ops_write,
};
/**
* netdev_ops operation
**/
static char txgbe_dbg_netdev_ops_buf[256] = "";
static ssize_t
txgbe_dbg_netdev_ops_read(struct file *filp,
char __user *buffer,
size_t count, loff_t *ppos)
{
struct txgbe_adapter *adapter = filp->private_data;
char *buf;
int len;
/* don't allow partial reads */
if (*ppos != 0)
return 0;
buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n",
adapter->netdev->name, txgbe_data_mode,
txgbe_dbg_netdev_ops_buf);
if (!buf)
return -ENOMEM;
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
static ssize_t
txgbe_dbg_netdev_ops_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
struct txgbe_adapter *adapter = filp->private_data;
int len;
/* don't allow partial writes */
if (*ppos != 0)
return 0;
if (count >= sizeof(txgbe_dbg_netdev_ops_buf))
return -ENOSPC;
len = simple_write_to_buffer(txgbe_dbg_netdev_ops_buf,
sizeof(txgbe_dbg_netdev_ops_buf)-1,
ppos,
buffer,
count);
if (len < 0)
return len;
txgbe_dbg_netdev_ops_buf[len] = '\0';
if (strncmp(txgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
#if defined(HAVE_TX_TIMEOUT_TXQUEUE)
adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, 0);
#elif defined(HAVE_NET_DEVICE_OPS)
adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
#else
adapter->netdev->tx_timeout(adapter->netdev);
#endif /* HAVE_NET_DEVICE_OPS */
e_dev_info("tx_timeout called\n");
} else {
e_dev_info("Unknown command: %s\n", txgbe_dbg_netdev_ops_buf);
e_dev_info("Available commands:\n");
e_dev_info(" tx_timeout\n");
}
return count;
}
static struct file_operations txgbe_dbg_netdev_ops_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = txgbe_dbg_netdev_ops_read,
.write = txgbe_dbg_netdev_ops_write,
};
/**
* txgbe_dbg_adapter_init - setup the debugfs directory for the adapter
* @adapter: the adapter that is starting up
**/
void txgbe_dbg_adapter_init(struct txgbe_adapter *adapter)
{
const char *name = pci_name(adapter->pdev);
struct dentry *pfile;
adapter->txgbe_dbg_adapter = debugfs_create_dir(name, txgbe_dbg_root);
if (!adapter->txgbe_dbg_adapter) {
e_dev_err("debugfs entry for %s failed\n", name);
return;
}
pfile = debugfs_create_file("data", 0600,
adapter->txgbe_dbg_adapter, adapter,
&txgbe_dbg_data_ops_fops);
if (!pfile)
e_dev_err("debugfs netdev_ops for %s failed\n", name);
pfile = debugfs_create_file("reg_ops", 0600,
adapter->txgbe_dbg_adapter, adapter,
&txgbe_dbg_reg_ops_fops);
if (!pfile)
e_dev_err("debugfs reg_ops for %s failed\n", name);
pfile = debugfs_create_file("netdev_ops", 0600,
adapter->txgbe_dbg_adapter, adapter,
&txgbe_dbg_netdev_ops_fops);
if (!pfile)
e_dev_err("debugfs netdev_ops for %s failed\n", name);
}
/**
* txgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
* @pf: the pf that is stopping
**/
void txgbe_dbg_adapter_exit(struct txgbe_adapter *adapter)
{
if (adapter->txgbe_dbg_adapter)
debugfs_remove_recursive(adapter->txgbe_dbg_adapter);
adapter->txgbe_dbg_adapter = NULL;
}
/**
* txgbe_dbg_init - start up debugfs for the driver
**/
void txgbe_dbg_init(void)
{
txgbe_dbg_root = debugfs_create_dir(txgbe_driver_name, NULL);
if (txgbe_dbg_root == NULL)
pr_err("init of debugfs failed\n");
}
/**
* txgbe_dbg_exit - clean out the driver's debugfs entries
**/
void txgbe_dbg_exit(void)
{
debugfs_remove_recursive(txgbe_dbg_root);
}
#endif /* HAVE_TXGBE_DEBUG_FS */
struct txgbe_reg_info {
u32 offset;
u32 length;
char *name;
};
static struct txgbe_reg_info txgbe_reg_info_tbl[] = {
/* General Registers */
{TXGBE_CFG_PORT_CTL, 1, "CTRL"},
{TXGBE_CFG_PORT_ST, 1, "STATUS"},
/* RX Registers */
{TXGBE_PX_RR_CFG(0), 1, "SRRCTL"},
{TXGBE_PX_RR_RP(0), 1, "RDH"},
{TXGBE_PX_RR_WP(0), 1, "RDT"},
{TXGBE_PX_RR_CFG(0), 1, "RXDCTL"},
{TXGBE_PX_RR_BAL(0), 1, "RDBAL"},
{TXGBE_PX_RR_BAH(0), 1, "RDBAH"},
/* TX Registers */
{TXGBE_PX_TR_BAL(0), 1, "TDBAL"},
{TXGBE_PX_TR_BAH(0), 1, "TDBAH"},
{TXGBE_PX_TR_RP(0), 1, "TDH"},
{TXGBE_PX_TR_WP(0), 1, "TDT"},
{TXGBE_PX_TR_CFG(0), 1, "TXDCTL"},
/* MACVLAN */
{TXGBE_PSR_MAC_SWC_VM_H, 128, "PSR_MAC_SWC_VM"},
{TXGBE_PSR_MAC_SWC_AD_L, 128, "PSR_MAC_SWC_AD"},
{TXGBE_PSR_VLAN_TBL(0), 128, "PSR_VLAN_TBL"},
/* QoS */
{TXGBE_TDM_RP_RATE, 128, "TDM_RP_RATE"},
/* List Terminator */
{ .name = NULL }
};
/**
* txgbe_regdump - register printout routine
**/
static void
txgbe_regdump(struct txgbe_hw *hw, struct txgbe_reg_info *reg_info)
{
#if 0
int i, n = 0;
u32 buffer[32*8];
switch (reg_info->offset) {
case TXGBE_PSR_MAC_SWC_VM_H:
for (i = 0; i < reg_info->length; i++) {
wr32(hw, TXGBE_PSR_MAC_SWC_IDX, i);
buffer[n++] =
rd32(hw, TXGBE_PSR_MAC_SWC_VM_H);
buffer[n++] =
rd32(hw, TXGBE_PSR_MAC_SWC_VM_L);
}
break;
case TXGBE_PSR_MAC_SWC_AD_L:
for (i = 0; i < reg_info->length; i++) {
wr32(hw, TXGBE_PSR_MAC_SWC_IDX, i);
buffer[n++] =
rd32(hw, TXGBE_PSR_MAC_SWC_AD_H);
buffer[n++] =
rd32(hw, TXGBE_PSR_MAC_SWC_AD_L);
}
break;
case TXGBE_TDM_RP_RATE:
for (i = 0; i < reg_info->length; i++) {
wr32(hw, TXGBE_TDM_RP_IDX, i);
buffer[n++] = rd32(hw, TXGBE_TDM_RP_RATE);
}
break;
default:
for (i = 0; i < reg_info->length; i++) {
buffer[n++] = rd32(hw,
reg_info->offset + 4*i);
}
break;
}
#if 0
for (i = 0; n && i < 32; i++) {
pr_info("%-20s[%02x-%02x]", reg_info->name, i*8, i*8 + 7);
for (j = 0; n && j < 8; j++, n--)
pr_cont(" %08x", buffer[i*8 + j]);
pr_cont("\n");
}
#endif
BUG_ON(n);
#endif
}
/**
* txgbe_dump - Print registers, tx-rings and rx-rings
**/
void txgbe_dump(struct txgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct txgbe_hw *hw = &adapter->hw;
struct txgbe_reg_info *reg_info;
int n = 0;
struct txgbe_ring *tx_ring;
struct txgbe_tx_buffer *tx_buffer;
union txgbe_tx_desc *tx_desc;
struct my_u0 { u64 a; u64 b; } *u0;
struct txgbe_ring *rx_ring;
union txgbe_rx_desc *rx_desc;
struct txgbe_rx_buffer *rx_buffer_info;
u32 staterr;
int i = 0;
if (!netif_msg_hw(adapter))
return;
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
pr_info(" Register Name Value\n");
for (reg_info = txgbe_reg_info_tbl; reg_info->name; reg_info++) {
txgbe_regdump(hw, reg_info);
}
/* Print TX Ring Summary */
if (!netdev || !netif_running(netdev))
return;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
pr_info(" %s %s %s %s\n",
"Queue [NTU] [NTC] [bi(ntc)->dma ]",
"leng", "ntw", "timestamp");
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
(u64)tx_buffer->time_stamp);
}
/* Print TX Rings */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
/* Transmit Descriptor Formats
*
* Transmit Descriptor (Read)
* +--------------------------------------------------------------+
* 0 | Buffer Address [63:0] |
* +--------------------------------------------------------------+
* 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
* +--------------------------------------------------------------+
* 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
*
* Transmit Descriptor (Write-Back)
* +--------------------------------------------------------------+
* 0 | RSV [63:0] |
* +--------------------------------------------------------------+
* 8 | RSV | STA | RSV |
* +--------------------------------------------------------------+
* 63 36 35 32 31 0
*/
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
pr_info("%s%s %s %s %s %s\n",
"T [desc] [address 63:0 ] ",
"[PlPOIdStDDt Ln] [bi->dma ] ",
"leng", "ntw", "timestamp", "bi->skb");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
tx_desc = TXGBE_TX_DESC(tx_ring, i);
tx_buffer = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (dma_unmap_len(tx_buffer, len) > 0) {
pr_info("T [0x%03X] %016llX %016llX %016llX "
"%08X %p %016llX %p",
i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
(u64)tx_buffer->time_stamp,
tx_buffer->skb);
if (i == tx_ring->next_to_use &&
i == tx_ring->next_to_clean)
pr_cont(" NTC/U\n");
else if (i == tx_ring->next_to_use)
pr_cont(" NTU\n");
else if (i == tx_ring->next_to_clean)
pr_cont(" NTC\n");
else
pr_cont("\n");
if (netif_msg_pktdata(adapter) &&
tx_buffer->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
tx_buffer->skb->data,
dma_unmap_len(tx_buffer, len),
true);
}
}
}
/* Print RX Rings Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info("%5d %5X %5X\n",
n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
/* Print RX Rings */
if (!netif_msg_rx_status(adapter))
return;
dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
/* Receive Descriptor Formats
*
* Receive Descriptor (Read)
* 63 1 0
* +-----------------------------------------------------+
* 0 | Packet Buffer Address [63:1] |A0/NSE|
* +----------------------------------------------+------+
* 8 | Header Buffer Address [63:1] | DD |
* +-----------------------------------------------------+
*
*
* Receive Descriptor (Write-Back)
*
* 63 48 47 32 31 30 21 20 17 16 4 3 0
* +------------------------------------------------------+
* 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
* |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
* |/ Flow Dir Flt ID | | | | | |
* +------------------------------------------------------+
* 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
pr_info("%s%s%s",
"R [desc] [ PktBuf A0] ",
"[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
"<-- Adv Rx Read format\n");
pr_info("%s%s%s",
"RWB[desc] [PcsmIpSHl PtRs] ",
"[vl er S cks ln] ---------------- [bi->skb ] ",
"<-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
rx_buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = TXGBE_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (staterr & TXGBE_RXD_STAT_DD) {
/* Descriptor Done */
pr_info("RWB[0x%03X] %016llX "
"%016llX ---------------- %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
rx_buffer_info->skb);
} else {
#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT
pr_info("R [0x%03X] %016llX "
"%016llX %016llX %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)rx_buffer_info->page_dma,
rx_buffer_info->skb);
if (netif_msg_pktdata(adapter) &&
rx_buffer_info->page_dma) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
page_address(rx_buffer_info->page) +
rx_buffer_info->page_offset,
txgbe_rx_bufsz(rx_ring), true);
}
#endif
}
if (i == rx_ring->next_to_use)
pr_cont(" NTU\n");
else if (i == rx_ring->next_to_clean)
pr_cont(" NTC\n");
else
pr_cont("\n");
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,984 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_fcoe.c, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "txgbe.h"
#if IS_ENABLED(CONFIG_FCOE)
#if IS_ENABLED(CONFIG_DCB)
#include "txgbe_dcb.h"
#endif /* CONFIG_DCB */
#include <linux/if_ether.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
/**
* txgbe_fcoe_clear_ddp - clear the given ddp context
* @ddp - ptr to the txgbe_fcoe_ddp
*
* Returns : none
*
*/
static inline void txgbe_fcoe_clear_ddp(struct txgbe_fcoe_ddp *ddp)
{
ddp->len = 0;
ddp->err = 1;
ddp->udl = NULL;
ddp->udp = 0UL;
ddp->sgl = NULL;
ddp->sgc = 0;
}
/**
* txgbe_fcoe_ddp_put - free the ddp context for a given xid
* @netdev: the corresponding net_device
* @xid: the xid that corresponding ddp will be freed
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_done
* and it is expected to be called by ULD, i.e., FCP layer of libfc
* to release the corresponding ddp context when the I/O is done.
*
* Returns : data length already ddp-ed in bytes
*/
int txgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
{
int len = 0;
struct txgbe_fcoe *fcoe;
struct txgbe_adapter *adapter;
struct txgbe_hw *hw;
struct txgbe_fcoe_ddp *ddp;
u32 fcbuff;
if (!netdev)
goto out_ddp_put;
if (xid > netdev->fcoe_ddp_xid)
goto out_ddp_put;
adapter = netdev_priv(netdev);
hw = &adapter->hw;
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
goto out_ddp_put;
len = ddp->len;
/* if there an error, force to invalidate ddp context */
if (ddp->err) {
/* other hardware requires DDP FCoE lock */
spin_lock_bh(&fcoe->lock);
wr32(hw, TXGBE_PSR_FC_FLT_CTXT, 0);
wr32(hw, TXGBE_PSR_FC_FLT_RW,
(xid | TXGBE_PSR_FC_FLT_RW_WE));
wr32(hw, TXGBE_RDM_FCBUF, 0);
wr32(hw, TXGBE_RDM_FCRW,
(xid | TXGBE_RDM_FCRW_WE));
/* read FCBUFF to check context invalidated */
wr32(hw, TXGBE_RDM_FCRW,
(xid | TXGBE_RDM_FCRW_RE));
fcbuff = rd32(hw, TXGBE_RDM_FCBUF);
spin_unlock_bh(&fcoe->lock);
/* guaranteed to be invalidated after 100us */
if (fcbuff & TXGBE_RDM_FCBUF_VALID)
udelay(100);
}
if (ddp->sgl)
dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
if (ddp->pool) {
dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ddp->pool = NULL;
}
txgbe_fcoe_clear_ddp(ddp);
out_ddp_put:
return len;
}
/**
* txgbe_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* Returns : 1 for success and 0 for no ddp
*/
static int txgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc,
int target_mode)
{
struct txgbe_adapter *adapter;
struct txgbe_hw *hw;
struct txgbe_fcoe *fcoe;
struct txgbe_fcoe_ddp *ddp;
struct txgbe_fcoe_ddp_pool *ddp_pool;
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
static const unsigned int bufflen = TXGBE_FCBUFF_MIN;
unsigned int firstoff = 0;
unsigned int lastsize;
unsigned int thisoff = 0;
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcfltctxt;
dma_addr_t addr = 0;
if (!netdev || !sgl || !sgc)
return 0;
adapter = netdev_priv(netdev);
if (xid > netdev->fcoe_ddp_xid) {
e_warn(drv, "xid=0x%x out-of-range\n", xid);
return 0;
}
/* no DDP if we are already down or resetting */
if (test_bit(__TXGBE_DOWN, &adapter->state) ||
test_bit(__TXGBE_RESETTING, &adapter->state))
return 0;
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
xid, ddp->sgl, ddp->sgc);
return 0;
}
txgbe_fcoe_clear_ddp(ddp);
if (!fcoe->ddp_pool) {
e_warn(drv, "No ddp_pool resources allocated\n");
return 0;
}
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
if (!ddp_pool->pool) {
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
goto out_noddp;
}
/* setup dma from scsi command sgl */
dmacount = dma_map_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc,
DMA_FROM_DEVICE);
if (dmacount == 0) {
e_err(drv, "xid 0x%x DMA map error\n", xid);
goto out_noddp;
}
/* alloc the udl from per cpu ddp pool */
ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
}
ddp->pool = ddp_pool->pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
j = 0;
for_each_sg(sgl, sg, dmacount, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= TXGBE_BUFFCNT_MAX) {
ddp_pool->noddp++;
goto out_noddp_free;
}
/* get the offset of length of current buffer */
thisoff = addr & ((dma_addr_t)bufflen - 1);
thislen = min((bufflen - thisoff), len);
/*
* all but the 1st buffer (j == 0)
* must be aligned on bufflen
*/
if ((j != 0) && (thisoff))
goto out_noddp_free;
/*
* all but the last buffer
* ((i == (dmacount - 1)) && (thislen == len))
* must end at bufflen
*/
if (((i != (dmacount - 1)) || (thislen != len))
&& ((thislen + thisoff) != bufflen))
goto out_noddp_free;
ddp->udl[j] = (u64)(addr - thisoff);
/* only the first buffer may have none-zero offset */
if (j == 0)
firstoff = thisoff;
len -= thislen;
addr += thislen;
j++;
}
}
/* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
/*
* lastsize can not be bufflen.
* If it is then adding another buffer with lastsize = 1.
* Since lastsize is 1 there will be no HW access to this buffer.
*/
if (lastsize == bufflen) {
if (j >= TXGBE_BUFFCNT_MAX) {
ddp_pool->noddp_ext_buff++;
goto out_noddp_free;
}
ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
j++;
lastsize = 1;
}
put_cpu();
fcbuff = TXGBE_RDM_FCBUF_SIZE(TXGBE_FCBUFF_4KB) |
TXGBE_RDM_FCBUF_COUNT(j) |
TXGBE_RDM_FCBUF_OFFSET(firstoff) |
TXGBE_RDM_FCBUF_VALID;
/* Set WRCONTX bit to allow DDP for target */
fcfltctxt = TXGBE_PSR_FC_FLT_CTXT_VALID;
if (!target_mode)
fcfltctxt |= TXGBE_PSR_FC_FLT_CTXT_WR;
fcdmarw = xid | TXGBE_RDM_FCRW_WE |
TXGBE_RDM_FCRW_LASTSIZE(lastsize);
fcfltrw = xid;
fcfltrw |= TXGBE_PSR_FC_FLT_RW_WE;
/* program DMA context */
hw = &adapter->hw;
/* turn on last frame indication for target mode as FCP_RSPtarget is
* supposed to send FCP_RSP when it is done. */
if (target_mode && !test_bit(__TXGBE_FCOE_TARGET, &fcoe->mode)) {
set_bit(__TXGBE_FCOE_TARGET, &fcoe->mode);
wr32m(hw, TXGBE_PSR_FC_CTL,
TXGBE_PSR_FC_CTL_LASTSEQH, TXGBE_PSR_FC_CTL_LASTSEQH);
}
/* other devices require DDP lock with direct DDP context access */
spin_lock_bh(&fcoe->lock);
wr32(hw, TXGBE_RDM_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
wr32(hw, TXGBE_RDM_FCPTRH, (u64)ddp->udp >> 32);
wr32(hw, TXGBE_RDM_FCBUF, fcbuff);
wr32(hw, TXGBE_RDM_FCRW, fcdmarw);
/* program filter context */
wr32(hw, TXGBE_PSR_FC_PARAM, 0);
wr32(hw, TXGBE_PSR_FC_FLT_CTXT, fcfltctxt);
wr32(hw, TXGBE_PSR_FC_FLT_RW, fcfltrw);
spin_unlock_bh(&fcoe->lock);
return 1;
out_noddp_free:
dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
txgbe_fcoe_clear_ddp(ddp);
out_noddp_unmap:
dma_unmap_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE);
out_noddp:
put_cpu();
return 0;
}
/**
* txgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O.
*
* Returns : 1 for success and 0 for no ddp
*/
int txgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return txgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
}
#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
/**
* txgbe_fcoe_ddp_target - called to set up ddp context in target mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_target
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O. The DDP in target mode is a write I/O request
* from the initiator.
*
* Returns : 1 for success and 0 for no ddp
*/
int txgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return txgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
}
#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */
/**
* txgbe_fcoe_ddp - check ddp status and mark it done
* @adapter: txgbe adapter
* @rx_desc: advanced rx descriptor
* @skb: the skb holding the received data
*
* This checks ddp status.
*
* Returns : < 0 indicates an error or not a FCoE ddp, 0 indicates
* not passing the skb to ULD, > 0 indicates is the length of data
* being ddped.
*/
int txgbe_fcoe_ddp(struct txgbe_adapter *adapter,
union txgbe_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct txgbe_fcoe *fcoe = &adapter->fcoe;
struct txgbe_fcoe_ddp *ddp;
struct fc_frame_header *fh;
int rc = -EINVAL, ddp_max;
__le32 fcerr = txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_FCERR);
__le32 ddp_err;
u32 fctl;
u16 xid;
if (fcerr == cpu_to_le32(TXGBE_FCERR_BADCRC))
skb->ip_summed = CHECKSUM_NONE;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* verify header contains at least the FCOE header */
BUG_ON(skb_headlen(skb) < FCOE_HEADER_LEN);
fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr));
if (skb->protocol == htons(ETH_P_8021Q))
fh = (struct fc_frame_header *)((char *)fh + VLAN_HLEN);
fctl = ntoh24(fh->fh_f_ctl);
if (fctl & FC_FC_EX_CTX)
xid = ntohs(fh->fh_ox_id);
else
xid = ntohs(fh->fh_rx_id);
ddp_max = TXGBE_FCOE_DDP_MAX;
if (xid >= ddp_max)
goto ddp_out;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
goto ddp_out;
ddp_err = txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_FCEOFE |
TXGBE_RXD_ERR_FCERR);
if (ddp_err)
goto ddp_out;
switch (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_FCSTAT)) {
/* return 0 to bypass going to ULD for DDPed data */
case __constant_cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_DDP):
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
rc = 0;
break;
/* unmap the sg list when FCPRSP is received */
case __constant_cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_FCPRSP):
dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = ddp_err;
ddp->sgl = NULL;
ddp->sgc = 0;
fallthrough;
/* if DDP length is present pass it through to ULD */
case __constant_cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_NODDP):
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
if (ddp->len)
rc = ddp->len;
break;
/* no match will return as an error */
case __constant_cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_NOMTCH):
default:
break;
}
/* In target mode, check the last data frame of the sequence.
* For DDP in target mode, data is already DDPed but the header
* indication of the last data frame ould allow is to tell if we
* got all the data and the ULP can send FCP_RSP back, as this is
* not a full fcoe frame, we fill the trailer here so it won't be
* dropped by the ULP stack.
*/
if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
(fctl & FC_FC_END_SEQ)) {
struct fcoe_crc_eof *crc;
skb_linearize(skb);
crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
}
ddp_out:
return rc;
}
/**
* txgbe_fso - txgbe FCoE Sequence Offload (FSO)
* @tx_ring: tx desc ring
* @first: first tx_buffer structure containing skb, tx_flags, and protocol
* @hdr_len: hdr_len to be returned
*
* This sets up large send offload for FCoE
*
* Returns : 0 indicates success, < 0 for error
*/
int txgbe_fso(struct txgbe_ring *tx_ring,
struct txgbe_tx_buffer *first,
u8 *hdr_len)
{
struct sk_buff *skb = first->skb;
struct fc_frame_header *fh;
u32 vlan_macip_lens;
u32 fcoe_sof_eof = 0;
u32 mss_l4len_idx;
u8 sof, eof;
#ifdef NETIF_F_FSO
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) {
dev_err(tx_ring->dev, "Wrong gso type %d:expecting "
"SKB_GSO_FCOE\n", skb_shinfo(skb)->gso_type);
return -EINVAL;
}
#endif
/* resets the header to point fcoe/fc */
skb_set_network_header(skb, skb->mac_len);
skb_set_transport_header(skb, skb->mac_len +
sizeof(struct fcoe_hdr));
/* sets up SOF and ORIS */
sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
switch (sof) {
case FC_SOF_I2:
fcoe_sof_eof = TXGBE_TXD_FCOEF_ORIS;
break;
case FC_SOF_I3:
fcoe_sof_eof = TXGBE_TXD_FCOEF_SOF |
TXGBE_TXD_FCOEF_ORIS;
break;
case FC_SOF_N2:
break;
case FC_SOF_N3:
fcoe_sof_eof = TXGBE_TXD_FCOEF_SOF;
break;
default:
dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
return -EINVAL;
}
/* the first byte of the last dword is EOF */
skb_copy_bits(skb, skb->len - 4, &eof, 1);
/* sets up EOF and ORIE */
switch (eof) {
case FC_EOF_N:
fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_N;
break;
case FC_EOF_T:
/* lso needs ORIE */
if (skb_is_gso(skb))
fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_N |
TXGBE_TXD_FCOEF_ORIE;
else
fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_T;
break;
case FC_EOF_NI:
fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_NI;
break;
case FC_EOF_A:
fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_A;
break;
default:
dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
return -EINVAL;
}
/* sets up PARINC indicating data offset */
fh = (struct fc_frame_header *)skb_transport_header(skb);
if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
fcoe_sof_eof |= TXGBE_TXD_FCOEF_PARINC;
/* include trailer in headlen as it is replicated per frame */
*hdr_len = sizeof(struct fcoe_crc_eof);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */
if (skb_is_gso(skb)) {
*hdr_len += skb_transport_offset(skb) +
sizeof(struct fc_frame_header);
/* update gso_segs and bytecount */
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len;
first->tx_flags |= TXGBE_TX_FLAGS_TSO;
}
/* set flag indicating FCOE to txgbe_tx_map call */
first->tx_flags |= TXGBE_TX_FLAGS_FCOE | TXGBE_TX_FLAGS_CC;
/* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << TXGBE_TXD_MSS_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_transport_offset(skb) +
sizeof(struct fc_frame_header);
vlan_macip_lens |= (skb_transport_offset(skb) - 4)
<< TXGBE_TXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK;
/* write context desc */
txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
TXGBE_TXD_TUCMD_FCOE, mss_l4len_idx);
return 0;
}
static void txgbe_fcoe_dma_pool_free(struct txgbe_fcoe *fcoe, unsigned int cpu)
{
struct txgbe_fcoe_ddp_pool *ddp_pool;
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
if (ddp_pool->pool)
dma_pool_destroy(ddp_pool->pool);
ddp_pool->pool = NULL;
}
static int txgbe_fcoe_dma_pool_alloc(struct txgbe_fcoe *fcoe,
struct device *dev,
unsigned int cpu)
{
struct txgbe_fcoe_ddp_pool *ddp_pool;
struct dma_pool *pool;
char pool_name[32];
snprintf(pool_name, 32, "txgbe_fcoe_ddp_%d", cpu);
pool = dma_pool_create(pool_name, dev, TXGBE_FCPTR_MAX,
TXGBE_FCPTR_ALIGN, PAGE_SIZE);
if (!pool)
return -ENOMEM;
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
ddp_pool->pool = pool;
ddp_pool->noddp = 0;
ddp_pool->noddp_ext_buff = 0;
return 0;
}
/**
* txgbe_configure_fcoe - configures registers for fcoe at start
* @adapter: ptr to txgbe adapter
*
* This sets up FCoE related registers
*
* Returns : none
*/
void txgbe_configure_fcoe(struct txgbe_adapter *adapter)
{
struct txgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
struct txgbe_hw *hw = &adapter->hw;
int i, fcoe_i;
u32 fcoe_q;
u32 etqf;
int fcreta_size;
/* Minimal funcionality for FCoE requires at least CRC offloads */
if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
return;
/* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
etqf = ETH_P_FCOE | TXGBE_PSR_ETYPE_SWC_FCOE |
TXGBE_PSR_ETYPE_SWC_FILTER_EN;
if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) {
etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE;
etqf |= VMDQ_P(0) << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT;
}
wr32(hw,
TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FCOE),
etqf);
wr32(hw,
TXGBE_RDB_ETYPE_CLS(TXGBE_PSR_ETYPE_SWC_FILTER_FCOE),
0);
/* leave remaining registers unconfigued if FCoE is disabled */
if (!(adapter->flags & TXGBE_FLAG_FCOE_ENABLED))
return;
/* Use one or more Rx queues for FCoE by redirection table */
fcreta_size = TXGBE_RDB_FCRE_TBL_SIZE;
for (i = 0; i < fcreta_size; i++) {
fcoe_i =
TXGBE_RDB_FCRE_TBL_RING(fcoe->offset + (i % fcoe->indices));
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
wr32(hw, TXGBE_RDB_FCRE_TBL(i), fcoe_q);
}
wr32(hw, TXGBE_RDB_FCRE_CTL, TXGBE_RDB_FCRE_CTL_ENA);
/* Enable L2 EtherType filter for FIP */
etqf = ETH_P_FIP | TXGBE_PSR_ETYPE_SWC_FILTER_EN;
if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) {
etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE;
etqf |= VMDQ_P(0) << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT;
}
wr32(hw, TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FIP),
etqf);
/* Send FIP frames to the first FCoE queue */
fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
wr32(hw, TXGBE_RDB_ETYPE_CLS(TXGBE_PSR_ETYPE_SWC_FILTER_FIP),
TXGBE_RDB_ETYPE_CLS_QUEUE_EN |
(fcoe_q << TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT));
/* Configure FCoE Rx control */
wr32(hw, TXGBE_PSR_FC_CTL,
TXGBE_PSR_FC_CTL_FCCRCBO |
TXGBE_PSR_FC_CTL_FCOEVER(FC_FCOE_VER) |
TXGBE_PSR_FC_CTL_ALLH);
}
/**
* txgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
* @adapter : txgbe adapter
*
* Cleans up outstanding ddp context resources
*
* Returns : none
*/
void txgbe_free_fcoe_ddp_resources(struct txgbe_adapter *adapter)
{
struct txgbe_fcoe *fcoe = &adapter->fcoe;
int cpu, i, ddp_max;
/* do nothing if no DDP pools were allocated */
if (!fcoe->ddp_pool)
return;
ddp_max = TXGBE_FCOE_DDP_MAX;
for (i = 0; i < ddp_max; i++)
txgbe_fcoe_ddp_put(adapter->netdev, i);
for_each_possible_cpu(cpu)
txgbe_fcoe_dma_pool_free(fcoe, cpu);
dma_unmap_single(pci_dev_to_dev(adapter->pdev),
fcoe->extra_ddp_buffer_dma,
TXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
kfree(fcoe->extra_ddp_buffer);
fcoe->extra_ddp_buffer = NULL;
fcoe->extra_ddp_buffer_dma = 0;
}
/**
* txgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
* @adapter: txgbe adapter
*
* Sets up ddp context resouces
*
* Returns : 0 indicates success or -EINVAL on failure
*/
int txgbe_setup_fcoe_ddp_resources(struct txgbe_adapter *adapter)
{
struct txgbe_fcoe *fcoe = &adapter->fcoe;
struct device *dev = pci_dev_to_dev(adapter->pdev);
void *buffer;
dma_addr_t dma;
unsigned int cpu;
/* do nothing if no DDP pools were allocated */
if (!fcoe->ddp_pool)
return 0;
/* Extra buffer to be shared by all DDPs for HW work around */
buffer = kmalloc(TXGBE_FCBUFF_MIN, GFP_ATOMIC);
if (!buffer) {
e_err(drv, "failed to allocate extra DDP buffer\n");
return -ENOMEM;
}
dma = dma_map_single(dev, buffer, TXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma)) {
e_err(drv, "failed to map extra DDP buffer\n");
kfree(buffer);
return -ENOMEM;
}
fcoe->extra_ddp_buffer = buffer;
fcoe->extra_ddp_buffer_dma = dma;
/* allocate pci pool for each cpu */
for_each_possible_cpu(cpu) {
int err = txgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
if (!err)
continue;
e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
txgbe_free_fcoe_ddp_resources(adapter);
return -ENOMEM;
}
return 0;
}
#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
int txgbe_fcoe_ddp_enable(struct txgbe_adapter *adapter)
#else
static int txgbe_fcoe_ddp_enable(struct txgbe_adapter *adapter)
#endif
{
struct txgbe_fcoe *fcoe = &adapter->fcoe;
if (!(adapter->flags & TXGBE_FLAG_FCOE_CAPABLE))
return -EINVAL;
fcoe->ddp_pool = alloc_percpu(struct txgbe_fcoe_ddp_pool);
if (!fcoe->ddp_pool) {
e_err(drv, "failed to allocate percpu DDP resources\n");
return -ENOMEM;
}
adapter->netdev->fcoe_ddp_xid = TXGBE_FCOE_DDP_MAX - 1;
return 0;
}
#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
void txgbe_fcoe_ddp_disable(struct txgbe_adapter *adapter)
#else
static void txgbe_fcoe_ddp_disable(struct txgbe_adapter *adapter)
#endif
{
struct txgbe_fcoe *fcoe = &adapter->fcoe;
adapter->netdev->fcoe_ddp_xid = 0;
if (!fcoe->ddp_pool)
return;
free_percpu(fcoe->ddp_pool);
fcoe->ddp_pool = NULL;
}
#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
/**
* txgbe_fcoe_enable - turn on FCoE offload feature
* @netdev: the corresponding netdev
*
* Turns on FCoE offload feature in sapphire.
*
* Returns : 0 indicates success or -EINVAL on failure
*/
int txgbe_fcoe_enable(struct net_device *netdev)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
struct txgbe_fcoe *fcoe = &adapter->fcoe;
atomic_inc(&fcoe->refcnt);
if (!(adapter->flags & TXGBE_FLAG_FCOE_CAPABLE))
return -EINVAL;
if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED)
return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n");
if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)
e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
/* Allocate per CPU memory to track DDP pools */
txgbe_fcoe_ddp_enable(adapter);
/* enable FCoE and notify stack */
adapter->flags |= TXGBE_FLAG_FCOE_ENABLED;
netdev->features |= NETIF_F_FCOE_MTU;
netdev_features_change(netdev);
/* release existing queues and reallocate them */
txgbe_clear_interrupt_scheme(adapter);
txgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
return 0;
}
/**
* txgbe_fcoe_disable - turn off FCoE offload feature
* @netdev: the corresponding netdev
*
* Turns off FCoE offload feature in sapphire.
*
* Returns : 0 indicates success or -EINVAL on failure
*/
int txgbe_fcoe_disable(struct net_device *netdev)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
return -EINVAL;
if (!(adapter->flags & TXGBE_FLAG_FCOE_ENABLED))
return -EINVAL;
e_info(drv, "Disabling FCoE offload features.\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
/* Free per CPU memory to track DDP pools */
txgbe_fcoe_ddp_disable(adapter);
/* disable FCoE and notify stack */
adapter->flags &= ~TXGBE_FLAG_FCOE_ENABLED;
netdev->features &= ~NETIF_F_FCOE_MTU;
netdev_features_change(netdev);
/* release existing queues and reallocate them */
txgbe_clear_interrupt_scheme(adapter);
txgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
return 0;
}
#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
#if IS_ENABLED(CONFIG_DCB)
#ifdef HAVE_DCBNL_OPS_GETAPP
/**
* txgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE
* @netdev: the corresponding net_device
*
* Finds out the corresponding user priority bitmap from the current
* traffic class that FCoE belongs to. Returns 0 as the invalid user
* priority bitmap to indicate an error.
*
* Returns : 802.1p user priority bitmap for FCoE
*/
u8 txgbe_fcoe_getapp(struct net_device *netdev)
{
struct txgbe_adapter *adapter = netdev_priv(netdev);
return 1 << adapter->fcoe.up;
}
#endif /* HAVE_DCBNL_OPS_GETAPP */
#endif /* CONFIG_DCB */
#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
/**
* txgbe_fcoe_get_wwn - get world wide name for the node or the port
* @netdev : txgbe adapter
* @wwn : the world wide name
* @type: the type of world wide name
*
* Returns the node or port world wide name if both the prefix and the san
* mac address are valid, then the wwn is formed based on the NAA-2 for
* IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
*
* Returns : 0 on success
*/
int txgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
int rc = -EINVAL;
u16 prefix = 0xffff;
struct txgbe_adapter *adapter = netdev_priv(netdev);
struct txgbe_mac_info *mac = &adapter->hw.mac;
switch (type) {
case NETDEV_FCOE_WWNN:
prefix = mac->wwnn_prefix;
break;
case NETDEV_FCOE_WWPN:
prefix = mac->wwpn_prefix;
break;
default:
break;
}
if ((prefix != 0xffff) &&
is_valid_ether_addr(mac->san_addr)) {
*wwn = ((u64) prefix << 48) |
((u64) mac->san_addr[0] << 40) |
((u64) mac->san_addr[1] << 32) |
((u64) mac->san_addr[2] << 24) |
((u64) mac->san_addr[3] << 16) |
((u64) mac->san_addr[4] << 8) |
((u64) mac->san_addr[5]);
rc = 0;
}
return rc;
}
#endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */
/**
* txgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
* @adapter - pointer to the device adapter structure
*
* Return : TC that FCoE is mapped to
*/
u8 txgbe_fcoe_get_tc(struct txgbe_adapter *adapter)
{
return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
}
#endif /* CONFIG_FCOE */

View File

@ -0,0 +1,91 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_fcoe.h, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _TXGBE_FCOE_H_
#define _TXGBE_FCOE_H_
#if IS_ENABLED(CONFIG_FCOE)
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_fcoe.h>
/* shift bits within STAT fo FCSTAT */
#define TXGBE_RXD_FCSTAT_SHIFT 4
/* ddp user buffer */
#define TXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */
#define TXGBE_FCPTR_ALIGN 16
#define TXGBE_FCPTR_MAX (TXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
#define TXGBE_FCBUFF_4KB 0x0
#define TXGBE_FCBUFF_8KB 0x1
#define TXGBE_FCBUFF_16KB 0x2
#define TXGBE_FCBUFF_64KB 0x3
#define TXGBE_FCBUFF_MAX 65536 /* 64KB max */
#define TXGBE_FCBUFF_MIN 4096 /* 4KB min */
#define TXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
/* Default user priority to use for FCoE */
#define TXGBE_FCOE_DEFUP 3
/* fcerr */
#define TXGBE_FCERR_BADCRC 0x00100000
#define TXGBE_FCERR_EOFSOF 0x00200000
#define TXGBE_FCERR_NOFIRST 0x00300000
#define TXGBE_FCERR_OOOSEQ 0x00400000
#define TXGBE_FCERR_NODMA 0x00500000
#define TXGBE_FCERR_PKTLOST 0x00600000
/* FCoE DDP for target mode */
#define __TXGBE_FCOE_TARGET 1
struct txgbe_fcoe_ddp {
int len;
u32 err;
unsigned int sgc;
struct scatterlist *sgl;
dma_addr_t udp;
u64 *udl;
struct dma_pool *pool;
};
/* per cpu variables */
struct txgbe_fcoe_ddp_pool {
struct dma_pool *pool;
u64 noddp;
u64 noddp_ext_buff;
};
struct txgbe_fcoe {
struct txgbe_fcoe_ddp_pool __percpu *ddp_pool;
atomic_t refcnt;
spinlock_t lock;
struct txgbe_fcoe_ddp ddp[TXGBE_FCOE_DDP_MAX];
void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
u8 up;
u8 up_set;
};
#endif /* CONFIG_FCOE */
#endif /* _TXGBE_FCOE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,316 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
#ifndef _TXGBE_HW_H_
#define _TXGBE_HW_H_
#define TXGBE_EMC_INTERNAL_DATA 0x00
#define TXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
#define TXGBE_EMC_DIODE1_DATA 0x01
#define TXGBE_EMC_DIODE1_THERM_LIMIT 0x19
#define TXGBE_EMC_DIODE2_DATA 0x23
#define TXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
#define TXGBE_EMC_DIODE3_DATA 0x2A
#define TXGBE_EMC_DIODE3_THERM_LIMIT 0x30
#define SPI_CLK_DIV 2
#define SPI_CMD_ERASE_CHIP 4 // SPI erase chip command
#define SPI_CMD_ERASE_SECTOR 3 // SPI erase sector command
#define SPI_CMD_WRITE_DWORD 0 // SPI write a dword command
#define SPI_CMD_READ_DWORD 1 // SPI read a dword command
#define SPI_CMD_USER_CMD 5 // SPI user command
#define SPI_CLK_CMD_OFFSET 28 // SPI command field offset in Command register
#define SPI_CLK_DIV_OFFSET 25 // SPI clock divide field offset in Command register
#define SPI_TIME_OUT_VALUE 10000
#define SPI_SECTOR_SIZE (4 * 1024) // FLASH sector size is 64KB
#define SPI_H_CMD_REG_ADDR 0x10104 // SPI Command register address
#define SPI_H_DAT_REG_ADDR 0x10108 // SPI Data register address
#define SPI_H_STA_REG_ADDR 0x1010c // SPI Status register address
#define SPI_H_USR_CMD_REG_ADDR 0x10110 // SPI User Command register address
#define SPI_CMD_CFG1_ADDR 0x10118 // Flash command configuration register 1
#define MISC_RST_REG_ADDR 0x1000c // Misc reset register address
#define MGR_FLASH_RELOAD_REG_ADDR 0x101a0 // MGR reload flash read
#define MAC_ADDR0_WORD0_OFFSET_1G 0x006000c // MAC Address for LAN0, stored in external FLASH
#define MAC_ADDR0_WORD1_OFFSET_1G 0x0060014
#define MAC_ADDR1_WORD0_OFFSET_1G 0x007000c // MAC Address for LAN1, stored in external FLASH
#define MAC_ADDR1_WORD1_OFFSET_1G 0x0070014
#define PRODUCT_SERIAL_NUM_OFFSET_1G 0x00f0000 // Product Serial Number, stored in external FLASH last sector
struct txgbe_hic_read_cab {
union txgbe_hic_hdr2 hdr;
union {
u8 d8[252];
u16 d16[126];
u32 d32[63];
} dbuf;
};
/**
* Packet Type decoding
**/
/* txgbe_dec_ptype.mac: outer mac */
enum txgbe_dec_ptype_mac {
TXGBE_DEC_PTYPE_MAC_IP = 0,
TXGBE_DEC_PTYPE_MAC_L2 = 2,
TXGBE_DEC_PTYPE_MAC_FCOE = 3,
};
/* txgbe_dec_ptype.[e]ip: outer&encaped ip */
#define TXGBE_DEC_PTYPE_IP_FRAG (0x4)
enum txgbe_dec_ptype_ip {
TXGBE_DEC_PTYPE_IP_NONE = 0,
TXGBE_DEC_PTYPE_IP_IPV4 = 1,
TXGBE_DEC_PTYPE_IP_IPV6 = 2,
TXGBE_DEC_PTYPE_IP_FGV4 =
(TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV4),
TXGBE_DEC_PTYPE_IP_FGV6 =
(TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV6),
};
/* txgbe_dec_ptype.etype: encaped type */
enum txgbe_dec_ptype_etype {
TXGBE_DEC_PTYPE_ETYPE_NONE = 0,
TXGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */
TXGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */
TXGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */
TXGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */
};
/* txgbe_dec_ptype.proto: payload proto */
enum txgbe_dec_ptype_prot {
TXGBE_DEC_PTYPE_PROT_NONE = 0,
TXGBE_DEC_PTYPE_PROT_UDP = 1,
TXGBE_DEC_PTYPE_PROT_TCP = 2,
TXGBE_DEC_PTYPE_PROT_SCTP = 3,
TXGBE_DEC_PTYPE_PROT_ICMP = 4,
TXGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */
};
/* txgbe_dec_ptype.layer: payload layer */
enum txgbe_dec_ptype_layer {
TXGBE_DEC_PTYPE_LAYER_NONE = 0,
TXGBE_DEC_PTYPE_LAYER_PAY2 = 1,
TXGBE_DEC_PTYPE_LAYER_PAY3 = 2,
TXGBE_DEC_PTYPE_LAYER_PAY4 = 3,
};
struct txgbe_dec_ptype {
u32 ptype:8;
u32 known:1;
u32 mac:2; /* outer mac */
u32 ip:3; /* outer ip*/
u32 etype:3; /* encaped type */
u32 eip:3; /* encaped ip */
u32 prot:4; /* payload proto */
u32 layer:3; /* payload layer */
};
typedef struct txgbe_dec_ptype txgbe_dptype;
void txgbe_dcb_get_rtrup2tc(struct txgbe_hw *hw, u8 *map);
u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw);
s32 txgbe_init_hw(struct txgbe_hw *hw);
s32 txgbe_start_hw(struct txgbe_hw *hw);
s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw);
s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr);
s32 txgbe_get_bus_info(struct txgbe_hw *hw);
void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status);
void txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw);
s32 txgbe_stop_adapter(struct txgbe_hw *hw);
s32 txgbe_led_on(struct txgbe_hw *hw, u32 index);
s32 txgbe_led_off(struct txgbe_hw *hw, u32 index);
s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools,
u32 enable_addr);
s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index);
s32 txgbe_init_rx_addrs(struct txgbe_hw *hw);
s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count,
txgbe_mc_addr_itr func, bool clear);
s32 txgbe_update_uc_addr_list(struct txgbe_hw *hw, u8 *addr_list,
u32 addr_count, txgbe_mc_addr_itr func);
s32 txgbe_enable_mc(struct txgbe_hw *hw);
s32 txgbe_disable_mc(struct txgbe_hw *hw);
s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw);
s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw);
s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw);
s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw);
s32 txgbe_fc_enable(struct txgbe_hw *hw);
bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw);
void txgbe_fc_autoneg(struct txgbe_hw *hw);
s32 txgbe_setup_fc(struct txgbe_hw *hw);
s32 txgbe_validate_mac_addr(u8 *mac_addr);
s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask);
void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask);
s32 txgbe_disable_pcie_master(struct txgbe_hw *hw);
s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq);
s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq);
s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq);
s32 txgbe_insert_mac_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq);
s32 txgbe_init_uta_tables(struct txgbe_hw *hw);
s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on);
s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool *vfta_changed);
s32 txgbe_clear_vfta(struct txgbe_hw *hw);
s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan);
s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int pf);
void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf);
void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw,
bool enable, int vf);
s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps);
void txgbe_set_rxpba(struct txgbe_hw *hw, int num_pb, u32 headroom,
int strategy);
s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
s32 txgbe_reset_hostif(struct txgbe_hw *hw);
u8 txgbe_calculate_checksum(u8 *buffer, u32 length);
s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data);
void txgbe_clear_tx_pending(struct txgbe_hw *hw);
void txgbe_stop_mac_link_on_d3(struct txgbe_hw *hw);
bool txgbe_mng_present(struct txgbe_hw *hw);
bool txgbe_check_mng_access(struct txgbe_hw *hw);
s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw);
s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw);
void txgbe_enable_rx(struct txgbe_hw *hw);
void txgbe_disable_rx(struct txgbe_hw *hw);
s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
u32 speed,
bool autoneg_wait_to_complete);
int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit);
#if 0
void txgbe_disable_fdir(struct txgbe_hw *hw);
#endif
/* @txgbe_api.h */
s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw);
s32 txgbe_init_fdir_signature(struct txgbe_hw *hw, u32 fdirctrl);
s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl,
bool cloud_mode);
s32 txgbe_fdir_add_signature_filter(struct txgbe_hw *hw,
union txgbe_atr_hash_dword input,
union txgbe_atr_hash_dword common,
u8 queue);
s32 txgbe_fdir_set_input_mask(struct txgbe_hw *hw,
union txgbe_atr_input *input_mask, bool cloud_mode);
s32 txgbe_fdir_write_perfect_filter(struct txgbe_hw *hw,
union txgbe_atr_input *input,
u16 soft_id, u8 queue, bool cloud_mode);
s32 txgbe_fdir_erase_perfect_filter(struct txgbe_hw *hw,
union txgbe_atr_input *input,
u16 soft_id);
s32 txgbe_fdir_add_perfect_filter(struct txgbe_hw *hw,
union txgbe_atr_input *input,
union txgbe_atr_input *mask,
u16 soft_id,
u8 queue,
bool cloud_mode);
void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input,
union txgbe_atr_input *mask);
u32 txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input,
union txgbe_atr_hash_dword common);
s32 txgbe_get_link_capabilities(struct txgbe_hw *hw,
u32 *speed, bool *autoneg);
enum txgbe_media_type txgbe_get_media_type(struct txgbe_hw *hw);
void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw);
void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw);
void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw);
void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw,
u32 speed);
s32 txgbe_setup_mac_link(struct txgbe_hw *hw, u32 speed,
bool autoneg_wait_to_complete);
void txgbe_init_mac_link_ops(struct txgbe_hw *hw);
s32 txgbe_reset_hw(struct txgbe_hw *hw);
s32 txgbe_identify_phy(struct txgbe_hw *hw);
s32 txgbe_init_phy_ops(struct txgbe_hw *hw);
s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval);
s32 txgbe_init_ops(struct txgbe_hw *hw);
s32 txgbe_setup_eee(struct txgbe_hw *hw, bool enable_eee);
s32 txgbe_init_flash_params(struct txgbe_hw *hw);
s32 txgbe_read_flash_buffer(struct txgbe_hw *hw, u32 offset,
u32 dwords, u32 *data);
s32 txgbe_write_flash_buffer(struct txgbe_hw *hw, u32 offset,
u32 dwords, u32 *data);
s32 txgbe_read_eeprom(struct txgbe_hw *hw,
u16 offset, u16 *data);
s32 txgbe_read_eeprom_buffer(struct txgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 txgbe_init_eeprom_params(struct txgbe_hw *hw);
s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw);
s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw);
s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw,
u16 *checksum_val);
s32 txgbe_update_flash(struct txgbe_hw *hw);
int txgbe_upgrade_flash(struct txgbe_hw *hw, u32 region,
const u8 *data, u32 size);
s32 txgbe_write_ee_hostif_buffer(struct txgbe_hw *hw,
u16 offset, u16 words, u16 *data);
s32 txgbe_write_ee_hostif(struct txgbe_hw *hw, u16 offset,
u16 data);
s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw,
u16 offset, u16 words, u16 *data);
s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset, u16 *data);
u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr);
void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data);
void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data);
u32 rd32_ephy(struct txgbe_hw *hw, u32 addr);
s32 txgbe_upgrade_flash_hostif(struct txgbe_hw *hw, u32 region,
const u8 *data, u32 size);
s32 txgbe_close_notify(struct txgbe_hw *hw);
s32 txgbe_open_notify(struct txgbe_hw *hw);
s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg);
s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg);
s32 txgbe_set_link_to_kx(struct txgbe_hw *hw,
u32 speed,
bool autoneg);
int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data);
s32 txgbe_hic_write_lldp(struct txgbe_hw *hw,u32 open);
int txgbe_is_lldp(struct txgbe_hw *hw);
#endif /* _TXGBE_HW_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,707 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_mbx.c, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "txgbe_type.h"
#include "txgbe.h"
#include "txgbe_mbx.h"
/**
* txgbe_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
* returns SUCCESS if it successfuly read message from buffer
**/
int txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
int err = TXGBE_ERR_MBX;
/* limit read to size of mailbox */
if (size > mbx->size)
size = mbx->size;
err = TCALL(hw, mbx.ops.read, msg, size, mbx_id);
return err;
}
/**
* txgbe_write_mbx - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer
**/
int txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
int err = 0;
if (size > mbx->size) {
err = TXGBE_ERR_MBX;
ERROR_REPORT2(TXGBE_ERROR_ARGUMENT,
"Invalid mailbox message size %d", size);
} else
err = TCALL(hw, mbx.ops.write, msg, size, mbx_id);
return err;
}
/**
* txgbe_check_for_msg - checks to see if someone sent us mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
int txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id)
{
int err = TXGBE_ERR_MBX;
err = TCALL(hw, mbx.ops.check_for_msg, mbx_id);
return err;
}
/**
* txgbe_check_for_ack - checks to see if someone sent us ACK
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
int txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id)
{
int err = TXGBE_ERR_MBX;
err = TCALL(hw, mbx.ops.check_for_ack, mbx_id);
return err;
}
/**
* txgbe_check_for_rst - checks to see if other side has reset
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
int txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
int err = TXGBE_ERR_MBX;
if (mbx->ops.check_for_rst)
err = mbx->ops.check_for_rst(hw, mbx_id);
return err;
}
/**
* txgbe_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification
**/
int txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_msg)
goto out;
while (countdown && TCALL(hw, mbx.ops.check_for_msg, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->udelay);
}
if (countdown == 0)
ERROR_REPORT2(TXGBE_ERROR_POLLING,
"Polling for VF%d mailbox message timedout", mbx_id);
out:
return countdown ? 0 : TXGBE_ERR_MBX;
}
/**
* txgbe_poll_for_ack - Wait for message acknowledgement
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
int txgbe_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_ack)
goto out;
while (countdown && TCALL(hw, mbx.ops.check_for_ack, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->udelay);
}
if (countdown == 0)
ERROR_REPORT2(TXGBE_ERROR_POLLING,
"Polling for VF%d mailbox ack timedout", mbx_id);
out:
return countdown ? 0 : TXGBE_ERR_MBX;
}
/**
* txgbe_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
int txgbe_read_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
int err = TXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
err = txgbe_poll_for_msg(hw, mbx_id);
/* if ack received read message, otherwise we timed out */
if (!err)
err = TCALL(hw, mbx.ops.read, msg, size, mbx_id);
out:
return err;
}
/**
* txgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
int txgbe_write_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
int err;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->timeout)
return TXGBE_ERR_MBX;
/* send msg */
err = TCALL(hw, mbx.ops.write, msg, size, mbx_id);
/* if msg sent wait until we receive an ack */
if (!err)
err = txgbe_poll_for_ack(hw, mbx_id);
return err;
}
/**
* txgbe_init_mbx_ops - Initialize MB function pointers
* @hw: pointer to the HW structure
*
* Setups up the mailbox read and write message function pointers
**/
void txgbe_init_mbx_ops(struct txgbe_hw *hw)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
mbx->ops.read_posted = txgbe_read_posted_mbx;
mbx->ops.write_posted = txgbe_write_posted_mbx;
}
/**
* txgbe_read_v2p_mailbox - read v2p mailbox
* @hw: pointer to the HW structure
*
* This function is used to read the v2p mailbox without losing the read to
* clear status bits.
**/
u32 txgbe_read_v2p_mailbox(struct txgbe_hw *hw)
{
u32 v2p_mailbox = rd32(hw, TXGBE_VXMAILBOX);
v2p_mailbox |= hw->mbx.v2p_mailbox;
/* read and clear mirrored mailbox flags */
v2p_mailbox |= rd32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE);
wr32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE, 0);
hw->mbx.v2p_mailbox |= v2p_mailbox & TXGBE_VXMAILBOX_R2C_BITS;
return v2p_mailbox;
}
/**
* txgbe_check_for_bit_vf - Determine if a status bit was set
* @hw: pointer to the HW structure
* @mask: bitmask for bits to be tested and cleared
*
* This function is used to check for the read to clear bits within
* the V2P mailbox.
**/
int txgbe_check_for_bit_vf(struct txgbe_hw *hw, u32 mask)
{
u32 mailbox = txgbe_read_v2p_mailbox(hw);
hw->mbx.v2p_mailbox &= ~mask;
return (mailbox & mask ? 0 : TXGBE_ERR_MBX);
}
/**
* txgbe_check_for_msg_vf - checks to see if the PF has sent mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the PF has set the Status bit or else ERR_MBX
**/
int txgbe_check_for_msg_vf(struct txgbe_hw *hw, u16 mbx_id)
{
int err = TXGBE_ERR_MBX;
UNREFERENCED_PARAMETER(mbx_id);
/* read clear the pf sts bit */
if (!txgbe_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFSTS)) {
err = 0;
hw->mbx.stats.reqs++;
}
return err;
}
/**
* txgbe_check_for_ack_vf - checks to see if the PF has ACK'd
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
**/
int txgbe_check_for_ack_vf(struct txgbe_hw *hw, u16 mbx_id)
{
int err = TXGBE_ERR_MBX;
UNREFERENCED_PARAMETER(mbx_id);
/* read clear the pf ack bit */
if (!txgbe_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFACK)) {
err = 0;
hw->mbx.stats.acks++;
}
return err;
}
/**
* txgbe_check_for_rst_vf - checks to see if the PF has reset
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns true if the PF has set the reset done bit or else false
**/
int txgbe_check_for_rst_vf(struct txgbe_hw *hw, u16 mbx_id)
{
int err = TXGBE_ERR_MBX;
UNREFERENCED_PARAMETER(mbx_id);
if (!txgbe_check_for_bit_vf(hw, (TXGBE_VXMAILBOX_RSTD |
TXGBE_VXMAILBOX_RSTI))) {
err = 0;
hw->mbx.stats.rsts++;
}
return err;
}
/**
* txgbe_obtain_mbx_lock_vf - obtain mailbox lock
* @hw: pointer to the HW structure
*
* return SUCCESS if we obtained the mailbox lock
**/
int txgbe_obtain_mbx_lock_vf(struct txgbe_hw *hw)
{
int err = TXGBE_ERR_MBX;
u32 mailbox;
/* Take ownership of the buffer */
wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_VFU);
/* reserve mailbox for vf use */
mailbox = txgbe_read_v2p_mailbox(hw);
if (mailbox & TXGBE_VXMAILBOX_VFU)
err = 0;
else
ERROR_REPORT2(TXGBE_ERROR_POLLING,
"Failed to obtain mailbox lock for VF");
return err;
}
/**
* txgbe_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer
**/
int txgbe_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
int err;
u16 i;
UNREFERENCED_PARAMETER(mbx_id);
/* lock the mailbox to prevent pf/vf race condition */
err = txgbe_obtain_mbx_lock_vf(hw);
if (err)
goto out_no_write;
/* flush msg and acks as we are overwriting the message buffer */
txgbe_check_for_msg_vf(hw, 0);
txgbe_check_for_ack_vf(hw, 0);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
wr32a(hw, TXGBE_VXMBMEM, i, msg[i]);
/* update stats */
hw->mbx.stats.msgs_tx++;
/* Drop VFU and interrupt the PF to tell it a message has been sent */
wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_REQ);
out_no_write:
return err;
}
/**
* txgbe_read_mbx_vf - Reads a message from the inbox intended for vf
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
* returns SUCCESS if it successfuly read message from buffer
**/
int txgbe_read_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
int err = 0;
u16 i;
UNREFERENCED_PARAMETER(mbx_id);
/* lock the mailbox to prevent pf/vf race condition */
err = txgbe_obtain_mbx_lock_vf(hw);
if (err)
goto out_no_read;
/* copy the message from the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = rd32a(hw, TXGBE_VXMBMEM, i);
/* Acknowledge receipt and release mailbox, then we're done */
wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return err;
}
/**
* txgbe_init_mbx_params_vf - set initial values for vf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for vf mailbox
*/
void txgbe_init_mbx_params_vf(struct txgbe_hw *hw)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
/* start mailbox as timed out and let the reset_hw call set the timeout
* value to begin communications */
mbx->timeout = 0;
mbx->udelay = TXGBE_VF_MBX_INIT_DELAY;
mbx->size = TXGBE_VXMAILBOX_SIZE;
mbx->ops.read = txgbe_read_mbx_vf;
mbx->ops.write = txgbe_write_mbx_vf;
mbx->ops.read_posted = txgbe_read_posted_mbx;
mbx->ops.write_posted = txgbe_write_posted_mbx;
mbx->ops.check_for_msg = txgbe_check_for_msg_vf;
mbx->ops.check_for_ack = txgbe_check_for_ack_vf;
mbx->ops.check_for_rst = txgbe_check_for_rst_vf;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
}
int txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, int index)
{
u32 mbvficr = rd32(hw, TXGBE_MBVFICR(index));
int err = TXGBE_ERR_MBX;
if (mbvficr & mask) {
err = 0;
wr32(hw, TXGBE_MBVFICR(index), mask);
}
return err;
}
/**
* txgbe_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
* @vf: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
int txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf)
{
int err = TXGBE_ERR_MBX;
int index = TXGBE_MBVFICR_INDEX(vf);
u32 vf_bit = vf % 16;
if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
index)) {
err = 0;
hw->mbx.stats.reqs++;
}
return err;
}
/**
* txgbe_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
* @vf: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
int txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf)
{
int err = TXGBE_ERR_MBX;
int index = TXGBE_MBVFICR_INDEX(vf);
u32 vf_bit = vf % 16;
if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFACK_VF1 << vf_bit,
index)) {
err = 0;
hw->mbx.stats.acks++;
}
return err;
}
/**
* txgbe_check_for_rst_pf - checks to see if the VF has reset
* @hw: pointer to the HW structure
* @vf: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
int txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf)
{
u32 reg_offset = (vf < 32) ? 0 : 1;
u32 vf_shift = vf % 32;
u32 vflre = 0;
int err = TXGBE_ERR_MBX;
vflre = rd32(hw, TXGBE_VFLRE(reg_offset));
if (vflre & (1 << vf_shift)) {
err = 0;
wr32(hw, TXGBE_VFLREC(reg_offset), (1 << vf_shift));
hw->mbx.stats.rsts++;
}
return err;
}
/**
* txgbe_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
* @vf: the VF index
*
* return SUCCESS if we obtained the mailbox lock
**/
int txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf)
{
int err = TXGBE_ERR_MBX;
u32 mailbox;
/* Take ownership of the buffer */
wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_PFU);
/* reserve mailbox for vf use */
mailbox = rd32(hw, TXGBE_PXMAILBOX(vf));
if (mailbox & TXGBE_PXMAILBOX_PFU)
err = 0;
else
ERROR_REPORT2(TXGBE_ERROR_POLLING,
"Failed to obtain mailbox lock for PF%d", vf);
return err;
}
/**
* txgbe_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf: the VF index
*
* returns SUCCESS if it successfully copied message into the buffer
**/
int txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size,
u16 vf)
{
int err;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
err = txgbe_obtain_mbx_lock_pf(hw, vf);
if (err)
goto out_no_write;
/* flush msg and acks as we are overwriting the message buffer */
txgbe_check_for_msg_pf(hw, vf);
txgbe_check_for_ack_pf(hw, vf);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
wr32a(hw, TXGBE_PXMBMEM(vf), i, msg[i]);
/* Interrupt VF to tell it a message has been sent and release buffer*/
/* set mirrored mailbox flags */
wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_STS);
wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_STS);
/* update stats */
hw->mbx.stats.msgs_tx++;
out_no_write:
return err;
}
/**
* txgbe_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf: the VF index
*
* This function copies a message from the mailbox buffer to the caller's
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
int txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size,
u16 vf)
{
int err;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
err = txgbe_obtain_mbx_lock_pf(hw, vf);
if (err)
goto out_no_read;
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = rd32a(hw, TXGBE_PXMBMEM(vf), i);
/* Acknowledge the message and release buffer */
/* set mirrored mailbox flags */
wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_ACK);
wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return err;
}
/**
* txgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for pf mailbox
*/
void txgbe_init_mbx_params_pf(struct txgbe_hw *hw)
{
struct txgbe_mbx_info *mbx = &hw->mbx;
mbx->timeout = 0;
mbx->udelay = 0;
mbx->size = TXGBE_VXMAILBOX_SIZE;
mbx->ops.read = txgbe_read_mbx_pf;
mbx->ops.write = txgbe_write_mbx_pf;
mbx->ops.read_posted = txgbe_read_posted_mbx;
mbx->ops.write_posted = txgbe_write_posted_mbx;
mbx->ops.check_for_msg = txgbe_check_for_msg_pf;
mbx->ops.check_for_ack = txgbe_check_for_ack_pf;
mbx->ops.check_for_rst = txgbe_check_for_rst_pf;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
}

View File

@ -0,0 +1,173 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_mbx.h, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _TXGBE_MBX_H_
#define _TXGBE_MBX_H_
#define TXGBE_VXMAILBOX_SIZE (16 - 1)
/**
* VF Registers
**/
#define TXGBE_VXMAILBOX 0x00600
#define TXGBE_VXMAILBOX_REQ ((0x1) << 0) /* Request for PF Ready bit */
#define TXGBE_VXMAILBOX_ACK ((0x1) << 1) /* Ack PF message received */
#define TXGBE_VXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */
#define TXGBE_VXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */
#define TXGBE_VXMAILBOX_PFSTS ((0x1) << 4) /* PF wrote a message in the MB */
#define TXGBE_VXMAILBOX_PFACK ((0x1) << 5) /* PF ack the previous VF msg */
#define TXGBE_VXMAILBOX_RSTI ((0x1) << 6) /* PF has reset indication */
#define TXGBE_VXMAILBOX_RSTD ((0x1) << 7) /* PF has indicated reset done */
#define TXGBE_VXMAILBOX_R2C_BITS (TXGBE_VXMAILBOX_RSTD | \
TXGBE_VXMAILBOX_PFSTS | TXGBE_VXMAILBOX_PFACK)
#define TXGBE_VXMBMEM 0x00C00 /* 16*4B */
/**
* PF Registers
**/
#define TXGBE_PXMAILBOX(i) (0x00600 + (4 * (i))) /* i=[0,63] */
#define TXGBE_PXMAILBOX_STS ((0x1) << 0) /* Initiate message send to VF */
#define TXGBE_PXMAILBOX_ACK ((0x1) << 1) /* Ack message recv'd from VF */
#define TXGBE_PXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */
#define TXGBE_PXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */
#define TXGBE_PXMAILBOX_RVFU ((0x1) << 4) /* Reset VFU - used when VF stuck*/
#define TXGBE_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
#define TXGBE_VFLRP(i) (0x00490 + (4 * (i))) /* i=[0,1] */
#define TXGBE_VFLRE(i) (0x004A0 + (4 * (i))) /* i=[0,1] */
#define TXGBE_VFLREC(i) (0x004A8 + (4 * (i))) /* i=[0,1] */
/* SR-IOV specific macros */
#define TXGBE_MBVFICR(i) (0x00480 + (4 * (i))) /* i=[0,3] */
#define TXGBE_MBVFICR_INDEX(vf) ((vf) >> 4)
#define TXGBE_MBVFICR_VFREQ_MASK (0x0000FFFF) /* bits for VF messages */
#define TXGBE_MBVFICR_VFREQ_VF1 (0x00000001) /* bit for VF 1 message */
#define TXGBE_MBVFICR_VFACK_MASK (0xFFFF0000) /* bits for VF acks */
#define TXGBE_MBVFICR_VFACK_VF1 (0x00010000) /* bit for VF 1 ack */
/**
* Messages
**/
/* If it's a TXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is TXGBE_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
#define TXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
* this are the ACK */
#define TXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
* this are the NACK */
#define TXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
* clear to send requests */
#define TXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for extra info for certain messages */
#define TXGBE_VT_MSGINFO_MASK (0xFF << TXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
/*
* each element denotes a version of the API; existing numbers may not
* change; any additions must go at the end
*/
enum txgbe_pfvf_api_rev {
txgbe_mbox_api_null,
txgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
txgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
txgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
txgbe_mbox_api_unknown, /* indicates that API version is not known */
};
/* mailbox API, legacy requests */
#define TXGBE_VF_RESET 0x01 /* VF requests reset */
#define TXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define TXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define TXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
/* mailbox API, version 1.0 VF requests */
#define TXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define TXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
/* mailbox API, version 1.1 VF requests */
#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
/* mailbox API, version 1.2 VF requests */
#define TXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
#define TXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
#define TXGBE_VF_UPDATE_XCAST_MODE 0x0c
#define TXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
#define TXGBE_VF_GET_FW_VERSION 0x11 /* get fw version */
#define TXGBE_VF_BACKUP 0x8001 /* VF requests backup */
/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */
enum txgbevf_xcast_modes {
TXGBEVF_XCAST_MODE_NONE = 0,
TXGBEVF_XCAST_MODE_MULTI,
TXGBEVF_XCAST_MODE_ALLMULTI,
TXGBEVF_XCAST_MODE_PROMISC,
};
/* GET_QUEUES return data indices within the mailbox */
#define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
#define TXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
#define TXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */
#define TXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
#define TXGBE_VF_MC_TYPE_WORD 3
#define TXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
/* mailbox API, version 2.0 VF requests */
#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
#define TXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */
#define TXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */
#define TXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */
#define TXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */
#define TXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */
#define TXGBE_VF_SET_MTU 0x0F /* set a specific MTU */
/* mailbox API, version 2.0 PF requests */
#define TXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */
#define TXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define TXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
int txgbe_read_mbx(struct txgbe_hw *, u32 *, u16, u16);
int txgbe_write_mbx(struct txgbe_hw *, u32 *, u16, u16);
int txgbe_read_posted_mbx(struct txgbe_hw *, u32 *, u16, u16);
int txgbe_write_posted_mbx(struct txgbe_hw *, u32 *, u16, u16);
int txgbe_check_for_msg(struct txgbe_hw *, u16);
int txgbe_check_for_ack(struct txgbe_hw *, u16);
int txgbe_check_for_rst(struct txgbe_hw *, u16);
void txgbe_init_mbx_ops(struct txgbe_hw *hw);
void txgbe_init_mbx_params_vf(struct txgbe_hw *);
void txgbe_init_mbx_params_pf(struct txgbe_hw *);
#endif /* _TXGBE_MBX_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,227 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_osdep.h, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
/* glue for the OS independent part of txgbe
* includes register access macros
*/
#ifndef _TXGBE_OSDEP_H_
#define _TXGBE_OSDEP_H_
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/sched.h>
#include "txgbe_kcompat.h"
#define TXGBE_CPU_TO_BE16(_x) cpu_to_be16(_x)
#define TXGBE_BE16_TO_CPU(_x) be16_to_cpu(_x)
#define TXGBE_CPU_TO_BE32(_x) cpu_to_be32(_x)
#define TXGBE_BE32_TO_CPU(_x) be32_to_cpu(_x)
#define msec_delay(_x) msleep(_x)
#define usec_delay(_x) udelay(_x)
#define STATIC static
#define IOMEM __iomem
#define TXGBE_NAME "txgbe"
/* #define DBG 1 */
#define DPRINTK(nlevel, klevel, fmt, args...) \
((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
printk(KERN_##klevel TXGBE_NAME ": %s: %s: " fmt, \
adapter->netdev->name, \
__func__, ## args)))
#ifndef _WIN32
#define txgbe_emerg(fmt, ...) printk(KERN_EMERG fmt, ## __VA_ARGS__)
#define txgbe_alert(fmt, ...) printk(KERN_ALERT fmt, ## __VA_ARGS__)
#define txgbe_crit(fmt, ...) printk(KERN_CRIT fmt, ## __VA_ARGS__)
#define txgbe_error(fmt, ...) printk(KERN_ERR fmt, ## __VA_ARGS__)
#define txgbe_warn(fmt, ...) printk(KERN_WARNING fmt, ## __VA_ARGS__)
#define txgbe_notice(fmt, ...) printk(KERN_NOTICE fmt, ## __VA_ARGS__)
#define txgbe_info(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__)
#define txgbe_print(fmt, ...) printk(KERN_DEBUG fmt, ## __VA_ARGS__)
#define txgbe_trace(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__)
#else /* _WIN32 */
#define txgbe_error(lvl, fmt, ...) \
DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \
"%s-error: %s@%d, " fmt, \
"txgbe", __FUNCTION__, __LINE__, ## __VA_ARGS__)
#endif /* !_WIN32 */
#ifdef DBG
#ifndef _WIN32
#define txgbe_debug(fmt, ...) \
printk(KERN_DEBUG \
"%s-debug: %s@%d, " fmt, \
"txgbe", __FUNCTION__, __LINE__, ## __VA_ARGS__)
#else /* _WIN32 */
#define txgbe_debug(fmt, ...) \
DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \
"%s-debug: %s@%d, " fmt, \
"txgbe", __FUNCTION__, __LINE__, ## __VA_ARGS__)
#endif /* _WIN32 */
#else /* DBG */
#define txgbe_debug(fmt, ...) do {} while (0)
#endif /* DBG */
#ifdef DBG
#define ASSERT(_x) BUG_ON(!(_x))
#define DEBUGOUT(S) printk(KERN_DEBUG S)
#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A)
#define DEBUGOUT2(S, A...) printk(KERN_DEBUG S, ## A)
#define DEBUGOUT3(S, A...) printk(KERN_DEBUG S, ## A)
#define DEBUGOUT4(S, A...) printk(KERN_DEBUG S, ## A)
#define DEBUGOUT5(S, A...) printk(KERN_DEBUG S, ## A)
#define DEBUGOUT6(S, A...) printk(KERN_DEBUG S, ## A)
#define DEBUGFUNC(fmt, ...) txgbe_debug(fmt, ## __VA_ARGS__)
#else
#define ASSERT(_x) do {} while (0)
#define DEBUGOUT(S) do {} while (0)
#define DEBUGOUT1(S, A...) do {} while (0)
#define DEBUGOUT2(S, A...) do {} while (0)
#define DEBUGOUT3(S, A...) do {} while (0)
#define DEBUGOUT4(S, A...) do {} while (0)
#define DEBUGOUT5(S, A...) do {} while (0)
#define DEBUGOUT6(S, A...) do {} while (0)
#define DEBUGFUNC(fmt, ...) do {} while (0)
#endif
#define TXGBE_SFP_DETECT_RETRIES 2
struct txgbe_hw;
struct txgbe_msg {
u16 msg_enable;
};
struct net_device *txgbe_hw_to_netdev(const struct txgbe_hw *hw);
struct txgbe_msg *txgbe_hw_to_msg(const struct txgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
netdev_dbg(txgbe_hw_to_netdev(hw), format, ## arg)
#define hw_err(hw, format, arg...) \
netdev_err(txgbe_hw_to_netdev(hw), format, ## arg)
#define e_dev_info(format, arg...) \
dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg)
#define e_dev_warn(format, arg...) \
dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg)
#define e_dev_err(format, arg...) \
dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg)
#define e_dev_notice(format, arg...) \
dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg)
#define e_dbg(msglvl, format, arg...) \
netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_info(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_err(msglvl, format, arg...) \
netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_warn(msglvl, format, arg...) \
netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_crit(msglvl, format, arg...) \
netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
#define TXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
#define TXGBE_FAILED_READ_CFG_WORD 0xffffU
#define TXGBE_FAILED_READ_CFG_BYTE 0xffU
extern u32 txgbe_read_reg(struct txgbe_hw *hw, u32 reg, bool quiet);
extern u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg);
extern void txgbe_write_pci_cfg_word(struct txgbe_hw *hw, u32 reg, u16 value);
#define TXGBE_READ_PCIE_WORD txgbe_read_pci_cfg_word
#define TXGBE_WRITE_PCIE_WORD txgbe_write_pci_cfg_word
#define TXGBE_R32_Q(h, r) txgbe_read_reg(h, r, true)
#ifndef writeq
#define writeq(val, addr) do { writel((u32) (val), addr); \
writel((u32) (val >> 32), (addr + 4)); \
} while (0);
#endif
#define TXGBE_EEPROM_GRANT_ATTEMPS 100
#define TXGBE_HTONL(_i) htonl(_i)
#define TXGBE_NTOHL(_i) ntohl(_i)
#define TXGBE_NTOHS(_i) ntohs(_i)
#define TXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i)
#define TXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i)
enum {
TXGBE_ERROR_SOFTWARE,
TXGBE_ERROR_POLLING,
TXGBE_ERROR_INVALID_STATE,
TXGBE_ERROR_UNSUPPORTED,
TXGBE_ERROR_ARGUMENT,
TXGBE_ERROR_CAUTION,
};
#define ERROR_REPORT(level, format, arg...) do { \
switch (level) { \
case TXGBE_ERROR_SOFTWARE: \
case TXGBE_ERROR_CAUTION: \
case TXGBE_ERROR_POLLING: \
netif_warn(txgbe_hw_to_msg(hw), drv, txgbe_hw_to_netdev(hw), \
format, ## arg); \
break; \
case TXGBE_ERROR_INVALID_STATE: \
case TXGBE_ERROR_UNSUPPORTED: \
case TXGBE_ERROR_ARGUMENT: \
netif_err(txgbe_hw_to_msg(hw), hw, txgbe_hw_to_netdev(hw), \
format, ## arg); \
break; \
default: \
break; \
} \
} while (0)
#define ERROR_REPORT1 ERROR_REPORT
#define ERROR_REPORT2 ERROR_REPORT
#define ERROR_REPORT3 ERROR_REPORT
#define UNREFERENCED_XPARAMETER
#define UNREFERENCED_1PARAMETER(_p) do { \
uninitialized_var(_p); \
} while (0)
#define UNREFERENCED_2PARAMETER(_p, _q) do { \
uninitialized_var(_p); \
uninitialized_var(_q); \
} while (0)
#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \
uninitialized_var(_p); \
uninitialized_var(_q); \
uninitialized_var(_r); \
} while (0)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \
uninitialized_var(_p); \
uninitialized_var(_q); \
uninitialized_var(_r); \
uninitialized_var(_s); \
} while (0)
#define UNREFERENCED_PARAMETER(_p) UNREFERENCED_1PARAMETER(_p)
#endif /* _TXGBE_OSDEP_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,384 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/bitops.h>
#include "txgbe_pcierr.h"
#include "txgbe.h"
#define TXGBE_ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \
PCI_ERR_ROOT_CMD_NONFATAL_EN| \
PCI_ERR_ROOT_CMD_FATAL_EN)
#ifndef PCI_ERS_RESULT_NO_AER_DRIVER
/* No AER capabilities registered for the driver */
#define PCI_ERS_RESULT_NO_AER_DRIVER ((__force pci_ers_result_t) 6)
#endif
static const char *aer_correctable_error_string[16] = {
"RxErr", /* Bit Position 0 */
NULL,
NULL,
NULL,
NULL,
NULL,
"BadTLP", /* Bit Position 6 */
"BadDLLP", /* Bit Position 7 */
"Rollover", /* Bit Position 8 */
NULL,
NULL,
NULL,
"Timeout", /* Bit Position 12 */
"NonFatalErr", /* Bit Position 13 */
"CorrIntErr", /* Bit Position 14 */
"HeaderOF", /* Bit Position 15 */
};
static const char *aer_uncorrectable_error_string[27] = {
"Undefined", /* Bit Position 0 */
NULL,
NULL,
NULL,
"DLP", /* Bit Position 4 */
"SDES", /* Bit Position 5 */
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
"TLP", /* Bit Position 12 */
"FCP", /* Bit Position 13 */
"CmpltTO", /* Bit Position 14 */
"CmpltAbrt", /* Bit Position 15 */
"UnxCmplt", /* Bit Position 16 */
"RxOF", /* Bit Position 17 */
"MalfTLP", /* Bit Position 18 */
"ECRC", /* Bit Position 19 */
"UnsupReq", /* Bit Position 20 */
"ACSViol", /* Bit Position 21 */
"UncorrIntErr", /* Bit Position 22 */
"BlockedTLP", /* Bit Position 23 */
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
"PoisonTLPBlocked", /* Bit Position 26 */
};
#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
/* redefinition because centos 6 can't use pci_walk_bus in pci.h*/
struct rw_semaphore pci_bus_sem;
/** pci_walk_bus - walk devices on/under bus, calling callback.
* @top bus whose devices should be walked
* @cb callback to be called for each device found
* @userdata arbitrary pointer to be passed to callback.
*
* Walk the given bus, including any bridged devices
* on buses under this bus. Call the provided callback
* on each device found.
*
* We check the return of @cb each time. If it returns anything
* other than 0, we break out.
*
*/
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata)
{
struct pci_dev *dev;
struct pci_bus *bus;
struct list_head *next;
int retval;
bus = top;
down_read(&pci_bus_sem);
next = top->devices.next;
for (;;) {
if (next == &bus->devices) {
/* end of this bus, go up or finish */
if (bus == top)
break;
next = bus->self->bus_list.next;
bus = bus->self->bus;
continue;
}
dev = list_entry(next, struct pci_dev, bus_list);
if (dev->subordinate) {
/* this is a pci-pci bridge, do its devices next */
next = dev->subordinate->devices.next;
bus = dev->subordinate;
} else
next = dev->bus_list.next;
retval = cb(dev, userdata);
if (retval)
break;
}
up_read(&pci_bus_sem);
}
#endif
static pci_ers_result_t merge_result(enum pci_ers_result orig,
enum pci_ers_result new)
{
if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
return PCI_ERS_RESULT_NO_AER_DRIVER;
if (new == PCI_ERS_RESULT_NONE)
return orig;
switch (orig) {
case PCI_ERS_RESULT_CAN_RECOVER:
case PCI_ERS_RESULT_RECOVERED:
orig = new;
break;
case PCI_ERS_RESULT_DISCONNECT:
if (new == PCI_ERS_RESULT_NEED_RESET)
orig = PCI_ERS_RESULT_NEED_RESET;
break;
default:
break;
}
return orig;
}
static int txgbe_report_error_detected(struct pci_dev *dev,
pci_channel_state_t state,
enum pci_ers_result *result)
{
pci_ers_result_t vote;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
if (
!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->error_detected) {
/*
* If any device in the subtree does not have an error_detected
* callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
* error callbacks of "any" device in the subtree, and will
* exit in the disconnected error state.
*/
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
vote = PCI_ERS_RESULT_NO_AER_DRIVER;
else
vote = PCI_ERS_RESULT_NONE;
} else {
err_handler = dev->driver->err_handler;
vote = err_handler->error_detected(dev, state);
}
*result = merge_result(*result, vote);
device_unlock(&dev->dev);
return 0;
}
static int txgbe_report_frozen_detected(struct pci_dev *dev, void *data)
{
return txgbe_report_error_detected(dev, pci_channel_io_frozen, data);
}
static int txgbe_report_mmio_enabled(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->mmio_enabled)
goto out;
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
*result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
}
static int txgbe_report_slot_reset(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->slot_reset)
goto out;
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
*result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
}
static int txgbe_report_resume(struct pci_dev *dev, void *data)
{
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
dev->error_state = pci_channel_io_normal;
if (
!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
goto out;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
out:
device_unlock(&dev->dev);
return 0;
}
void txgbe_pcie_do_recovery(struct pci_dev *dev)
{
pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
struct pci_bus *bus;
u32 reg32;
int pos;
int delay = 1;
u32 id;
u16 ctrl;
/*
* Error recovery runs on all subordinates of the first downstream port.
* If the downstream port detected the error, it is cleared at the end.
*/
if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM))
dev = dev->bus->self;
bus = dev->subordinate;
pci_walk_bus(bus, txgbe_report_frozen_detected, &status);
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (pos) {
/* Disable Root's interrupt in response to error messages */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~TXGBE_ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
}
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
/*
* * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
* * this to 2ms to ensure that we meet the minimum requirement.
* */
msleep(2);
ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
/*
* * Trhfa for conventional PCI is 2^25 clock cycles.
* * Assuming a minimum 33MHz clock this results in a 1s
* * delay before we can consider subordinate devices to
* * be re-initialized. PCIe has some ways to shorten this,
* * but we don't make use of them yet.
* */
ssleep(1);
pci_read_config_dword(dev, PCI_COMMAND, &id);
while (id == ~0) {
if (delay > 60000) {
pci_warn(dev, "not ready %dms after %s; giving up\n",
delay - 1, "bus_reset");
return;
}
if (delay > 1000)
pci_info(dev, "not ready %dms after %s; waiting\n",
delay - 1, "bus_reset");
msleep(delay);
delay *= 2;
pci_read_config_dword(dev, PCI_COMMAND, &id);
}
if (delay > 1000)
pci_info(dev, "ready %dms after %s\n", delay - 1,
"bus_reset");
pci_info(dev, "Root Port link has been reset\n");
if (pos) {
/* Clear Root Error Status */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
/* Enable Root Port's interrupt in response to error messages */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 |= TXGBE_ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
}
if (status == PCI_ERS_RESULT_CAN_RECOVER) {
status = PCI_ERS_RESULT_RECOVERED;
pci_dbg(dev, "broadcast mmio_enabled message\n");
pci_walk_bus(bus, txgbe_report_mmio_enabled, &status);
}
if (status == PCI_ERS_RESULT_NEED_RESET) {
/*
* TODO: Should call platform-specific
* functions to reset slot before calling
* drivers' slot_reset callbacks?
*/
status = PCI_ERS_RESULT_RECOVERED;
pci_dbg(dev, "broadcast slot_reset message\n");
pci_walk_bus(bus, txgbe_report_slot_reset, &status);
}
if (status != PCI_ERS_RESULT_RECOVERED)
goto failed;
pci_dbg(dev, "broadcast resume message\n");
pci_walk_bus(bus, txgbe_report_resume, &status);
failed:
;
}
void txgbe_aer_print_error(struct txgbe_adapter *adapter, u32 severity, u32 status)
{
unsigned long i;
const char *errmsg = NULL;
struct pci_dev *pdev = adapter->pdev;
unsigned long val = status;
for_each_set_bit(i, &val, 32) {
if (severity == TXGBE_AER_CORRECTABLE) {
errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
aer_correctable_error_string[i] : NULL;
} else {
errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ?
aer_uncorrectable_error_string[i] : NULL;
if (errmsg != NULL && i == 14)
adapter->cmplt_to_dis = true;
}
if (errmsg)
dev_info(&pdev->dev, " [%2ld] %-22s\n", i, errmsg);
}
}
bool txgbe_check_recovery_capability(struct pci_dev *dev)
{
#if defined(__i386__) || defined(__x86_64__)
return true;
#else
/* check upstream bridge is root or PLX brigde,
* or cpu is kupeng 920 or not
*/
if (dev->bus->self->vendor == 0x10b5 ||
dev->bus->self->vendor == 0x19e5)
return true;
else
return false;
#endif
}

View File

@ -0,0 +1,14 @@
#ifndef _TXGBE_PCIERR_H_
#define _TXGBE_PCIERR_H_
#include "txgbe.h"
#define TXGBE_AER_UNCORRECTABLE 1
#define TXGBE_AER_CORRECTABLE 2
void txgbe_pcie_do_recovery(struct pci_dev *dev);
void txgbe_aer_print_error(struct txgbe_adapter *adapter, u32 severity, u32 status);
bool txgbe_check_recovery_capability(struct pci_dev *dev);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,213 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_phy.h, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _TXGBE_PHY_H_
#define _TXGBE_PHY_H_
#include "txgbe_type.h"
#define TXGBE_I2C_EEPROM_DEV_ADDR 0xA0
#define TXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
#define TXGBE_I2C_EEPROM_BANK_LEN 0xFF
/*fiber to copper module inter reg i2c addr */
#define TXGBE_I2C_EEPROM_DEV_ADDR3 0xAC
#define TXGBE_I2C_PHY_LOCAL_RX_STATUS BIT(12)
#define TXGBE_I2C_PHY_REMOTE_RX_STATUS BIT(13)
#define TXGBE_I2C_10G_SFP_LINK_STATUS BIT(10)
/* EEPROM byte offsets */
#define TXGBE_SFF_IDENTIFIER 0x0
#define TXGBE_SFF_IDENTIFIER_SFP 0x3
#define TXGBE_SFF_VENDOR_OUI_BYTE0 0x25
#define TXGBE_SFF_VENDOR_OUI_BYTE1 0x26
#define TXGBE_SFF_VENDOR_OUI_BYTE2 0x27
#define TXGBE_SFF_1GBE_COMP_CODES 0x6
#define TXGBE_SFF_10GBE_COMP_CODES 0x3
#define TXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define TXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define TXGBE_SFF_DDM_IMPLEMENTED 0x40
#define TXGBE_SFF_SFF_8472_SWAP 0x5C
#define TXGBE_SFF_SFF_8472_COMP 0x5E
#define TXGBE_SFF_SFF_8472_OSCB 0x6E
#define TXGBE_SFF_SFF_8472_ESCB 0x76
#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
#define TXGBE_SFF_QSFP_CONNECTOR 0x82
#define TXGBE_SFF_QSFP_10GBE_COMP 0x83
#define TXGBE_SFF_QSFP_1GBE_COMP 0x86
#define TXGBE_SFF_QSFP_CABLE_LENGTH 0x92
#define TXGBE_SFF_QSFP_DEVICE_TECH 0x93
#define TXGBE_SFF_CABLE_VENDOR_NAME1 0x14
#define TXGBE_SFF_CABLE_VENDOR_NAME2 0x15
#define TXGBE_SFF_CABLE_VENDOR_NAME3 0x16
/* Bitmasks */
#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8
#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define TXGBE_SFF_1GBASESX_CAPABLE 0x1
#define TXGBE_SFF_1GBASELX_CAPABLE 0x2
#define TXGBE_SFF_1GBASET_CAPABLE 0x8
#define TXGBE_SFF_10GBASESR_CAPABLE 0x10
#define TXGBE_SFF_10GBASELR_CAPABLE 0x20
#define TXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
#define TXGBE_SFF_SOFT_RS_SELECT_10G 0x8
#define TXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define TXGBE_SFF_ADDRESSING_MODE 0x4
#define TXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define TXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
#define TXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
#define TXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
#define TXGBE_I2C_EEPROM_READ_MASK 0x100
#define TXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define TXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
#define TXGBE_I2C_EEPROM_STATUS_PASS 0x1
#define TXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define TXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
#define TXGBE_CS4227 0xBE /* CS4227 address */
#define TXGBE_CS4227_GLOBAL_ID_LSB 0
#define TXGBE_CS4227_SCRATCH 2
#define TXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
#define TXGBE_CS4227_SCRATCH_VALUE 0x5aa5
#define TXGBE_CS4227_RETRIES 5
#define TXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */
#define TXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */
#define TXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */
#define TXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */
#define TXGBE_CS4227_EDC_MODE_CX1 0x0002
#define TXGBE_CS4227_EDC_MODE_SR 0x0004
#define TXGBE_CS4227_RESET_HOLD 500 /* microseconds */
#define TXGBE_CS4227_RESET_DELAY 500 /* milliseconds */
#define TXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */
#define TXGBE_PE 0xE0 /* Port expander address */
#define TXGBE_PE_OUTPUT 1 /* Output register offset */
#define TXGBE_PE_CONFIG 3 /* Config register offset */
#define TXGBE_PE_BIT1 (1 << 1)
/* Flow control defines */
#define TXGBE_TAF_SYM_PAUSE (0x1)
#define TXGBE_TAF_ASM_PAUSE (0x2)
/* Bit-shift macros */
#define TXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
#define TXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
#define TXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
#define TXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
#define TXGBE_SFF_VENDOR_OUI_FTL 0x00906500
#define TXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
#define TXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
/* I2C SDA and SCL timing parameters for standard mode */
#define TXGBE_I2C_T_HD_STA 4
#define TXGBE_I2C_T_LOW 5
#define TXGBE_I2C_T_HIGH 4
#define TXGBE_I2C_T_SU_STA 5
#define TXGBE_I2C_T_HD_DATA 5
#define TXGBE_I2C_T_SU_DATA 1
#define TXGBE_I2C_T_RISE 1
#define TXGBE_I2C_T_FALL 1
#define TXGBE_I2C_T_SU_STO 4
#define TXGBE_I2C_T_BUF 5
#ifndef TXGBE_SFP_DETECT_RETRIES
#define TXGBE_SFP_DETECT_RETRIES 10
#endif /* TXGBE_SFP_DETECT_RETRIES */
/* SFP+ SFF-8472 Compliance */
#define TXGBE_SFF_SFF_8472_UNSUP 0x00
enum txgbe_phy_type txgbe_get_phy_type_from_id(struct txgbe_hw *hw);
s32 txgbe_get_phy_id(struct txgbe_hw *hw);
s32 txgbe_reset_phy(struct txgbe_hw *hw);
s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type,
u16 *phy_data);
s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type,
u16 phy_data);
s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
u32 txgbe_setup_phy_link(struct txgbe_hw *hw, u32 speed_set, bool autoneg_wait_to_complete);
u32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw,
u32 speed,
bool autoneg_wait_to_complete);
s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw,
u32 *speed,
bool *autoneg);
s32 txgbe_check_reset_blocked(struct txgbe_hw *hw);
s32 txgbe_get_phy_firmware_version(struct txgbe_hw *hw,
u16 *firmware_version);
s32 txgbe_identify_module(struct txgbe_hw *hw);
s32 txgbe_identify_sfp_module(struct txgbe_hw *hw);
s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw);
s32 txgbe_init_i2c(struct txgbe_hw *hw);
s32 txgbe_clear_i2c(struct txgbe_hw *hw);
s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr);
s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 txgbe_read_i2c_word(struct txgbe_hw *hw, u16 byte_offset,
u8 dev_addr, u16 *data);
s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
s32 txgbe_read_i2c_sfp_phy(struct txgbe_hw *hw, u16 byte_offset,
u16 *data);
s32 txgbe_init_external_phy(struct txgbe_hw *hw);
s32 txgbe_uninit_external_phy(struct txgbe_hw *hw);
s32 txgbe_set_phy_pause_advertisement(struct txgbe_hw *hw, u32 pause_bit);
s32 txgbe_get_phy_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit);
s32 txgbe_get_lp_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit);
MTD_STATUS txgbe_read_mdio(
MTD_DEV * dev,
MTD_U16 port,
MTD_U16 mmd,
MTD_U16 reg,
MTD_U16 *value);
MTD_STATUS txgbe_write_mdio(
MTD_DEV * dev,
MTD_U16 port,
MTD_U16 mmd,
MTD_U16 reg,
MTD_U16 value);
#endif /* _TXGBE_PHY_H_ */

View File

@ -0,0 +1,929 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_procfs.h, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "txgbe.h"
#include "txgbe_hw.h"
#include "txgbe_type.h"
#ifdef TXGBE_PROCFS
#ifndef TXGBE_SYSFS
#include <linux/module.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/device.h>
#include <linux/netdevice.h>
static struct proc_dir_entry *txgbe_top_dir;
static struct net_device_stats *procfs_get_stats(struct net_device *netdev)
{
#ifndef HAVE_NETDEV_STATS_IN_NETDEV
struct txgbe_adapter *adapter;
#endif
if (netdev == NULL)
return NULL;
#ifdef HAVE_NETDEV_STATS_IN_NETDEV
/* only return the current stats */
return &netdev->stats;
#else
adapter = netdev_priv(netdev);
/* only return the current stats */
return &adapter->net_stats;
#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
}
static int txgbe_fwbanner(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
return snprintf(page, count, "%s\n", adapter->eeprom_id);
}
static int txgbe_porttype(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
return snprintf(page, count, "%d\n",
test_bit(__TXGBE_DOWN, &adapter->state));
}
static int txgbe_portspeed(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
int speed = 0;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
switch (adapter->link_speed) {
case TXGBE_LINK_SPEED_100_FULL:
speed = 1;
break;
case TXGBE_LINK_SPEED_1GB_FULL:
speed = 10;
break;
case TXGBE_LINK_SPEED_10GB_FULL:
speed = 100;
break;
default:
break;
}
return snprintf(page, count, "%d\n", speed);
}
static int txgbe_wqlflag(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
return snprintf(page, count, "%d\n", adapter->wol);
}
static int txgbe_xflowctl(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct txgbe_hw *hw;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
return snprintf(page, count, "%d\n", hw->fc.current_mode);
}
static int txgbe_rxdrops(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device_stats *net_stats;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
net_stats = procfs_get_stats(adapter->netdev);
if (net_stats == NULL)
return snprintf(page, count, "error: no net stats\n");
return snprintf(page, count, "%lu\n",
net_stats->rx_dropped);
}
static int txgbe_rxerrors(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device_stats *net_stats;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
net_stats = procfs_get_stats(adapter->netdev);
if (net_stats == NULL)
return snprintf(page, count, "error: no net stats\n");
return snprintf(page, count, "%lu\n", net_stats->rx_errors);
}
static int txgbe_rxupacks(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_hw *hw;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
return snprintf(page, count, "%d\n", rd32(hw, TXGBE_TPR));
}
static int txgbe_rxmpacks(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_hw *hw;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
int i, mprc = 0;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
for (i = 0; i < 128; i++)
mprc += rd32(hw, TXGBE_PX_MPRC(i));
return snprintf(page, count, "%d\n", mprc);
}
static int txgbe_rxbpacks(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_hw *hw;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
return snprintf(page, count, "%d\n",
rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW));
}
static int txgbe_txupacks(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_hw *hw;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
return snprintf(page, count, "%d\n",
rd32(hw, TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW));
}
static int txgbe_txmpacks(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_hw *hw;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
return snprintf(page, count, "%d\n",
rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW));
}
static int txgbe_txbpacks(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_hw *hw;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
return snprintf(page, count, "%d\n",
rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW));
}
static int txgbe_txerrors(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device_stats *net_stats;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
net_stats = procfs_get_stats(adapter->netdev);
if (net_stats == NULL)
return snprintf(page, count, "error: no net stats\n");
return snprintf(page, count, "%lu\n",
net_stats->tx_errors);
}
static int txgbe_txdrops(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device_stats *net_stats;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
net_stats = procfs_get_stats(adapter->netdev);
if (net_stats == NULL)
return snprintf(page, count, "error: no net stats\n");
return snprintf(page, count, "%lu\n",
net_stats->tx_dropped);
}
static int txgbe_rxframes(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device_stats *net_stats;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
net_stats = procfs_get_stats(adapter->netdev);
if (net_stats == NULL)
return snprintf(page, count, "error: no net stats\n");
return snprintf(page, count, "%lu\n",
net_stats->rx_packets);
}
static int txgbe_rxbytes(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device_stats *net_stats;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
net_stats = procfs_get_stats(adapter->netdev);
if (net_stats == NULL)
return snprintf(page, count, "error: no net stats\n");
return snprintf(page, count, "%lu\n",
net_stats->rx_bytes);
}
static int txgbe_txframes(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device_stats *net_stats;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
net_stats = procfs_get_stats(adapter->netdev);
if (net_stats == NULL)
return snprintf(page, count, "error: no net stats\n");
return snprintf(page, count, "%lu\n",
net_stats->tx_packets);
}
static int txgbe_txbytes(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device_stats *net_stats;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
net_stats = procfs_get_stats(adapter->netdev);
if (net_stats == NULL)
return snprintf(page, count, "error: no net stats\n");
return snprintf(page, count, "%lu\n",
net_stats->tx_bytes);
}
static int txgbe_linkstat(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_hw *hw;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
int bitmask = 0;
u32 link_speed;
bool link_up = false;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
if (!test_bit(__TXGBE_DOWN, &adapter->state))
bitmask |= 1;
if (TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false)
/* always assume link is up, if no check link function */
link_up = true;
if (link_up)
bitmask |= 2;
if (adapter->old_lsc != adapter->lsc_int) {
bitmask |= 4;
adapter->old_lsc = adapter->lsc_int;
}
return snprintf(page, count, "0x%X\n", bitmask);
}
static int txgbe_funcid(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct txgbe_hw *hw;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
return snprintf(page, count, "0x%X\n", hw->bus.func);
}
static int txgbe_funcvers(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void __always_unused *data)
{
return snprintf(page, count, "%s\n", txgbe_driver_version);
}
static int txgbe_macburn(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_hw *hw;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
(unsigned int)hw->mac.perm_addr[0],
(unsigned int)hw->mac.perm_addr[1],
(unsigned int)hw->mac.perm_addr[2],
(unsigned int)hw->mac.perm_addr[3],
(unsigned int)hw->mac.perm_addr[4],
(unsigned int)hw->mac.perm_addr[5]);
}
static int txgbe_macadmn(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_hw *hw;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
(unsigned int)hw->mac.addr[0],
(unsigned int)hw->mac.addr[1],
(unsigned int)hw->mac.addr[2],
(unsigned int)hw->mac.addr[3],
(unsigned int)hw->mac.addr[4],
(unsigned int)hw->mac.addr[5]);
}
static int txgbe_maclla1(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct txgbe_hw *hw;
int rc;
u16 eeprom_buff[6];
u16 first_word = 0x37;
const u16 word_count = ARRAY_SIZE(eeprom_buff);
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
hw = &adapter->hw;
if (hw == NULL)
return snprintf(page, count, "error: no hw data\n");
rc = TCALL(hw, eeprom.ops.read_buffer, first_word, 1, &first_word);
if (rc != 0)
return snprintf(page, count,
"error: reading pointer to the EEPROM\n");
if (first_word != 0x0000 && first_word != 0xFFFF) {
rc = TCALL(hw, eeprom.ops.read_buffer, first_word, word_count,
eeprom_buff);
if (rc != 0)
return snprintf(page, count, "error: reading buffer\n");
} else {
memset(eeprom_buff, 0, sizeof(eeprom_buff));
}
switch (hw->bus.func) {
case 0:
return snprintf(page, count, "0x%04X%04X%04X\n",
eeprom_buff[0],
eeprom_buff[1],
eeprom_buff[2]);
case 1:
return snprintf(page, count, "0x%04X%04X%04X\n",
eeprom_buff[3],
eeprom_buff[4],
eeprom_buff[5]);
}
return snprintf(page, count, "unexpected port %d\n", hw->bus.func);
}
static int txgbe_mtusize(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device *netdev;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
netdev = adapter->netdev;
if (netdev == NULL)
return snprintf(page, count, "error: no net device\n");
return snprintf(page, count, "%d\n", netdev->mtu);
}
static int txgbe_featflag(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
int bitmask = 0;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device *netdev;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
netdev = adapter->netdev;
if (netdev == NULL)
return snprintf(page, count, "error: no net device\n");
if (adapter->netdev->features & NETIF_F_RXCSUM)
bitmask |= 1;
return snprintf(page, count, "%d\n", bitmask);
}
static int txgbe_lsominct(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void __always_unused *data)
{
return snprintf(page, count, "%d\n", 1);
}
static int txgbe_prommode(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
struct net_device *netdev;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
netdev = adapter->netdev;
if (netdev == NULL)
return snprintf(page, count, "error: no net device\n");
return snprintf(page, count, "%d\n",
netdev->flags & IFF_PROMISC);
}
static int txgbe_txdscqsz(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count);
}
static int txgbe_rxdscqsz(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count);
}
static int txgbe_rxqavg(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
int index;
int diff = 0;
u16 ntc;
u16 ntu;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
for (index = 0; index < adapter->num_rx_queues; index++) {
ntc = adapter->rx_ring[index]->next_to_clean;
ntu = adapter->rx_ring[index]->next_to_use;
if (ntc >= ntu)
diff += (ntc - ntu);
else
diff += (adapter->rx_ring[index]->count - ntu + ntc);
}
if (adapter->num_rx_queues <= 0)
return snprintf(page, count,
"can't calculate, number of queues %d\n",
adapter->num_rx_queues);
return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues);
}
static int txgbe_txqavg(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
int index;
int diff = 0;
u16 ntc;
u16 ntu;
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
for (index = 0; index < adapter->num_tx_queues; index++) {
ntc = adapter->tx_ring[index]->next_to_clean;
ntu = adapter->tx_ring[index]->next_to_use;
if (ntc >= ntu)
diff += (ntc - ntu);
else
diff += (adapter->tx_ring[index]->count - ntu + ntc);
}
if (adapter->num_tx_queues <= 0)
return snprintf(page, count,
"can't calculate, number of queues %d\n",
adapter->num_tx_queues);
return snprintf(page, count, "%d\n",
diff/adapter->num_tx_queues);
}
static int txgbe_iovotype(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void __always_unused *data)
{
return snprintf(page, count, "2\n");
}
static int txgbe_funcnbr(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
return snprintf(page, count, "%d\n", adapter->num_vfs);
}
static int txgbe_pciebnbr(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_adapter *adapter = (struct txgbe_adapter *)data;
if (adapter == NULL)
return snprintf(page, count, "error: no adapter\n");
return snprintf(page, count, "%d\n", adapter->pdev->bus->number);
}
static int txgbe_therm_dealarmthresh(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_therm_proc_data *therm_data =
(struct txgbe_therm_proc_data *)data;
if (therm_data == NULL)
return snprintf(page, count, "error: no therm_data\n");
return snprintf(page, count, "%d\n",
therm_data->sensor_data->dalarm_thresh);
}
static int txgbe_therm_alarmthresh(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
struct txgbe_therm_proc_data *therm_data =
(struct txgbe_therm_proc_data *)data;
if (therm_data == NULL)
return snprintf(page, count, "error: no therm_data\n");
return snprintf(page, count, "%d\n",
therm_data->sensor_data->alarm_thresh);
}
static int txgbe_therm_temp(char *page, char __always_unused **start,
off_t __always_unused off, int count,
int __always_unused *eof, void *data)
{
s32 status;
struct txgbe_therm_proc_data *therm_data =
(struct txgbe_therm_proc_data *)data;
if (therm_data == NULL)
return snprintf(page, count, "error: no therm_data\n");
status = txgbe_get_thermal_sensor_data(therm_data->hw);
if (status != 0)
snprintf(page, count, "error: status %d returned\n", status);
return snprintf(page, count, "%d\n", therm_data->sensor_data->temp);
}
struct txgbe_proc_type {
char name[32];
int (*read)(char*, char**, off_t, int, int*, void*);
};
struct txgbe_proc_type txgbe_proc_entries[] = {
{"fwbanner", &txgbe_fwbanner},
{"porttype", &txgbe_porttype},
{"portspeed", &txgbe_portspeed},
{"wqlflag", &txgbe_wqlflag},
{"xflowctl", &txgbe_xflowctl},
{"rxdrops", &txgbe_rxdrops},
{"rxerrors", &txgbe_rxerrors},
{"rxupacks", &txgbe_rxupacks},
{"rxmpacks", &txgbe_rxmpacks},
{"rxbpacks", &txgbe_rxbpacks},
{"txdrops", &txgbe_txdrops},
{"txerrors", &txgbe_txerrors},
{"txupacks", &txgbe_txupacks},
{"txmpacks", &txgbe_txmpacks},
{"txbpacks", &txgbe_txbpacks},
{"rxframes", &txgbe_rxframes},
{"rxbytes", &txgbe_rxbytes},
{"txframes", &txgbe_txframes},
{"txbytes", &txgbe_txbytes},
{"linkstat", &txgbe_linkstat},
{"funcid", &txgbe_funcid},
{"funcvers", &txgbe_funcvers},
{"macburn", &txgbe_macburn},
{"macadmn", &txgbe_macadmn},
{"maclla1", &txgbe_maclla1},
{"mtusize", &txgbe_mtusize},
{"featflag", &txgbe_featflag},
{"lsominct", &txgbe_lsominct},
{"prommode", &txgbe_prommode},
{"txdscqsz", &txgbe_txdscqsz},
{"rxdscqsz", &txgbe_rxdscqsz},
{"txqavg", &txgbe_txqavg},
{"rxqavg", &txgbe_rxqavg},
{"iovotype", &txgbe_iovotype},
{"funcnbr", &txgbe_funcnbr},
{"pciebnbr", &txgbe_pciebnbr},
{"", NULL}
};
struct txgbe_proc_type txgbe_internal_entries[] = {
{"temp", &txgbe_therm_temp},
{"alarmthresh", &txgbe_therm_alarmthresh},
{"dealarmthresh", &txgbe_therm_dealarmthresh},
{"", NULL}
};
void txgbe_del_proc_entries(struct txgbe_adapter *adapter)
{
int index;
int i;
char buf[16]; /* much larger than the sensor number will ever be */
if (txgbe_top_dir == NULL)
return;
for (i = 0; i < TXGBE_MAX_SENSORS; i++) {
if (adapter->therm_dir[i] == NULL)
continue;
for (index = 0; ; index++) {
if (txgbe_internal_entries[index].read == NULL)
break;
remove_proc_entry(txgbe_internal_entries[index].name,
adapter->therm_dir[i]);
}
snprintf(buf, sizeof(buf), "sensor_%d", i);
remove_proc_entry(buf, adapter->info_dir);
}
if (adapter->info_dir != NULL) {
for (index = 0; ; index++) {
if (txgbe_proc_entries[index].read == NULL)
break;
remove_proc_entry(txgbe_proc_entries[index].name,
adapter->info_dir);
}
remove_proc_entry("info", adapter->eth_dir);
}
if (adapter->eth_dir != NULL)
remove_proc_entry(pci_name(adapter->pdev), txgbe_top_dir);
}
/* called from txgbe_main.c */
void txgbe_procfs_exit(struct txgbe_adapter *adapter)
{
txgbe_del_proc_entries(adapter);
}
int txgbe_procfs_topdir_init(void)
{
txgbe_top_dir = proc_mkdir("driver/txgbe", NULL);
if (txgbe_top_dir == NULL)
return -ENOMEM;
return 0;
}
void txgbe_procfs_topdir_exit(void)
{
remove_proc_entry("driver/txgbe", NULL);
}
/* called from txgbe_main.c */
int txgbe_procfs_init(struct txgbe_adapter *adapter)
{
int rc = 0;
int index;
int i;
char buf[16]; /* much larger than the sensor number will ever be */
adapter->eth_dir = NULL;
adapter->info_dir = NULL;
adapter->therm_dir = NULL;
if (txgbe_top_dir == NULL) {
rc = -ENOMEM;
goto fail;
}
adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), txgbe_top_dir);
if (adapter->eth_dir == NULL) {
rc = -ENOMEM;
goto fail;
}
adapter->info_dir = proc_mkdir("info", adapter->eth_dir);
if (adapter->info_dir == NULL) {
rc = -ENOMEM;
goto fail;
}
for (index = 0; ; index++) {
if (txgbe_proc_entries[index].read == NULL)
break;
if (!(create_proc_read_entry(txgbe_proc_entries[index].name,
0444,
adapter->info_dir,
txgbe_proc_entries[index].read,
adapter))) {
rc = -ENOMEM;
goto fail;
}
}
if (!TCALL(&(adapter->hw), ops.init_thermal_sensor_thresh))
goto exit;
snprintf(buf, sizeof(buf), "sensor");
adapter->therm_dir = proc_mkdir(buf, adapter->info_dir);
if (adapter->therm_dir == NULL) {
rc = -ENOMEM;
goto fail;
}
for (index = 0; ; index++) {
if (txgbe_internal_entries[index].read == NULL)
break;
/*
* therm_data struct contains pointer the read func
* will be needing
*/
adapter->therm_data.hw = &adapter->hw;
adapter->therm_data.sensor_data =
&adapter->hw.mac.thermal_sensor_data.sensor;
if (!(create_proc_read_entry(
txgbe_internal_entries[index].name,
0444,
adapter->therm_dir,
txgbe_internal_entries[index].read,
&adapter->therm_data))) {
rc = -ENOMEM;
goto fail;
}
}
goto exit;
fail:
txgbe_del_proc_entries(adapter);
exit:
return rc;
}
#endif /* !TXGBE_SYSFS */
#endif /* TXGBE_PROCFS */

View File

@ -0,0 +1,917 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_ptp.c, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "txgbe.h"
#include <linux/ptp_classify.h>
/*
* SYSTIME is defined by a fixed point system which allows the user to
* define the scale counter increment value at every level change of
* the oscillator driving SYSTIME value. The time unit is determined by
* the clock frequency of the oscillator and TIMINCA register.
* The cyclecounter and timecounter structures are used to to convert
* the scale counter into nanoseconds. SYSTIME registers need to be converted
* to ns values by use of only a right shift.
* The following math determines the largest incvalue that will fit into
* the available bits in the TIMINCA register:
* Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ]
* PeriodWidth: Number of bits to store the clock period
* MaxWidth: The maximum width value of the TIMINCA register
* Period: The clock period for the oscillator, which changes based on the link
* speed:
* At 10Gb link or no link, the period is 6.4 ns.
* At 1Gb link, the period is multiplied by 10. (64ns)
* At 100Mb link, the period is multiplied by 100. (640ns)
* round(): discard the fractional portion of the calculation
*
* The calculated value allows us to right shift the SYSTIME register
* value in order to quickly convert it into a nanosecond clock,
* while allowing for the maximum possible adjustment value.
*
* LinkSpeed ClockFreq ClockPeriod TIMINCA:IV
* 10000Mbps 156.25MHz 6.4*10^-9 0xCCCCCC(0xFFFFF/ns)
* 1000 Mbps 62.5 MHz 16 *10^-9 0x800000(0x7FFFF/ns)
* 100 Mbps 6.25 MHz 160*10^-9 0xA00000(0xFFFF/ns)
* 10 Mbps 0.625 MHz 1600*10^-9 0xC7F380(0xFFF/ns)
* FPGA 31.25 MHz 32 *10^-9 0x800000(0x3FFFF/ns)
*
* These diagrams are only for the 10Gb link period
*
* +--------------+ +--------------+
* | 32 | | 8 | 3 | 20 |
* *--------------+ +--------------+
* \________ 43 bits ______/ fract
*
* The 43 bit SYSTIME overflows every
* 2^43 * 10^-9 / 3600 = 2.4 hours
*/
#define TXGBE_INCVAL_10GB 0xCCCCCC
#define TXGBE_INCVAL_1GB 0x800000
#define TXGBE_INCVAL_100 0xA00000
#define TXGBE_INCVAL_10 0xC7F380
#define TXGBE_INCVAL_FPGA 0x800000
#define TXGBE_INCVAL_SHIFT_10GB 20
#define TXGBE_INCVAL_SHIFT_1GB 18
#define TXGBE_INCVAL_SHIFT_100 15
#define TXGBE_INCVAL_SHIFT_10 12
#define TXGBE_INCVAL_SHIFT_FPGA 17
#define TXGBE_OVERFLOW_PERIOD (HZ * 30)
#define TXGBE_PTP_TX_TIMEOUT (HZ)
/**
* txgbe_ptp_read - read raw cycle counter (to be used by time counter)
* @hw_cc: the cyclecounter structure
*
* this function reads the cyclecounter registers and is called by the
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
static u64 txgbe_ptp_read(const struct cyclecounter *hw_cc)
{
struct txgbe_adapter *adapter =
container_of(hw_cc, struct txgbe_adapter, hw_cc);
struct txgbe_hw *hw = &adapter->hw;
u64 stamp = 0;
stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIML);
stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIMH) << 32;
return stamp;
}
/**
* txgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp
* @adapter: private adapter structure
* @hwtstamp: stack timestamp structure
* @systim: unsigned 64bit system time value
*
* We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
* which can be used by the stack's ptp functions.
*
* The lock is used to protect consistency of the cyclecounter and the SYSTIME
* registers. However, it does not need to protect against the Rx or Tx
* timestamp registers, as there can't be a new timestamp until the old one is
* unlatched by reading.
*
* In addition to the timestamp in hardware, some controllers need a software
* overflow cyclecounter, and this function takes this into account as well.
**/
static void txgbe_ptp_convert_to_hwtstamp(struct txgbe_adapter *adapter,
struct skb_shared_hwtstamps *hwtstamp,
u64 timestamp)
{
unsigned long flags;
u64 ns;
memset(hwtstamp, 0, sizeof(*hwtstamp));
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->hw_tc, timestamp);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
hwtstamp->hwtstamp = ns_to_ktime(ns);
}
/**
* txgbe_ptp_adjfreq
* @ptp: the ptp clock structure
* @ppb: parts per billion adjustment from base
*
* adjust the frequency of the ptp cycle counter by the
* indicated ppb from the base frequency.
*/
#ifndef HAVE_NOT_PTT_ADJFREQ
static int txgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct txgbe_adapter *adapter =
container_of(ptp, struct txgbe_adapter, ptp_caps);
struct txgbe_hw *hw = &adapter->hw;
u64 freq, incval;
u32 diff;
int neg_adj = 0;
if (ppb < 0) {
neg_adj = 1;
ppb = -ppb;
}
smp_mb();
incval = READ_ONCE(adapter->base_incval);
freq = incval;
freq *= ppb;
diff = div_u64(freq, 1000000000ULL);
incval = neg_adj ? (incval - diff) : (incval + diff);
if (incval > TXGBE_TSC_1588_INC_IV(~0))
e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
wr32(hw, TXGBE_TSC_1588_INC,
TXGBE_TSC_1588_INC_IVP(incval, 2));
return 0;
}
#endif
/**
* txgbe_ptp_adjtime
* @ptp: the ptp clock structure
* @delta: offset to adjust the cycle counter by ns
*
* adjust the timer by resetting the timecounter structure.
*/
static int txgbe_ptp_adjtime(struct ptp_clock_info *ptp,
s64 delta)
{
struct txgbe_adapter *adapter =
container_of(ptp, struct txgbe_adapter, ptp_caps);
unsigned long flags;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_adjtime(&adapter->hw_tc, delta);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
return 0;
}
/**
* txgbe_ptp_gettime64
* @ptp: the ptp clock structure
* @ts: timespec64 structure to hold the current time value
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec64.
*/
static int txgbe_ptp_gettime64(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct txgbe_adapter *adapter =
container_of(ptp, struct txgbe_adapter, ptp_caps);
unsigned long flags;
u64 ns;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_read(&adapter->hw_tc);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
*ts = ns_to_timespec64(ns);
return 0;
}
/**
* txgbe_ptp_settime64
* @ptp: the ptp clock structure
* @ts: the timespec64 containing the new time for the cycle counter
*
* reset the timecounter to use a new base value instead of the kernel
* wall timer value.
*/
static int txgbe_ptp_settime64(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct txgbe_adapter *adapter =
container_of(ptp, struct txgbe_adapter, ptp_caps);
u64 ns;
unsigned long flags;
ns = timespec64_to_ns(ts);
/* reset the timecounter */
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
return 0;
}
#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64
static int txgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
{
struct timespec64 ts64;
int err;
err = txgbe_ptp_gettime64(ptp, &ts64);
if (err)
return err;
*ts = timespec64_to_timespec(ts64);
return 0;
}
static int txgbe_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec *ts)
{
struct timespec64 ts64;
ts64 = timespec_to_timespec64(*ts);
return txgbe_ptp_settime64(ptp, &ts64);
}
#endif
/**
* txgbe_ptp_feature_enable
* @ptp: the ptp clock structure
* @rq: the requested feature to change
* @on: whether to enable or disable the feature
*
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
*/
static int txgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
return -ENOTSUPP;
}
/**
* txgbe_ptp_check_pps_event
* @adapter: the private adapter structure
* @eicr: the interrupt cause register value
*
* This function is called by the interrupt routine when checking for
* interrupts. It will check and handle a pps event.
*/
void txgbe_ptp_check_pps_event(struct txgbe_adapter *adapter)
{
struct ptp_clock_event event;
event.type = PTP_CLOCK_PPS;
/* this check is necessary in case the interrupt was enabled via some
* alternative means (ex. debug_fs). Better to check here than
* everywhere that calls this function.
*/
if (!adapter->ptp_clock)
return;
/* we don't config PPS on SDP yet, so just return.
* ptp_clock_event(adapter->ptp_clock, &event);
*/
}
/**
* txgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow
* @adapter: private adapter struct
*
* this watchdog task periodically reads the timecounter
* in order to prevent missing when the system time registers wrap
* around. This needs to be run approximately twice a minute for the fastest
* overflowing hardware. We run it for all hardware since it shouldn't have a
* large impact.
*/
void txgbe_ptp_overflow_check(struct txgbe_adapter *adapter)
{
bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
TXGBE_OVERFLOW_PERIOD);
struct timespec64 ts;
if (timeout) {
txgbe_ptp_gettime64(&adapter->ptp_caps, &ts);
adapter->last_overflow_check = jiffies;
}
}
/**
* txgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched
* @adapter: private network adapter structure
*
* this watchdog task is scheduled to detect error case where hardware has
* dropped an Rx packet that was timestamped when the ring is full. The
* particular error is rare but leaves the device in a state unable to timestamp
* any future packets.
*/
void txgbe_ptp_rx_hang(struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
struct txgbe_ring *rx_ring;
u32 tsyncrxctl = rd32(hw, TXGBE_PSR_1588_CTL);
unsigned long rx_event;
int n;
/* if we don't have a valid timestamp in the registers, just update the
* timeout counter and exit
*/
if (!(tsyncrxctl & TXGBE_PSR_1588_CTL_VALID)) {
adapter->last_rx_ptp_check = jiffies;
return;
}
/* determine the most recent watchdog or rx_timestamp event */
rx_event = adapter->last_rx_ptp_check;
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
if (time_after(rx_ring->last_rx_timestamp, rx_event))
rx_event = rx_ring->last_rx_timestamp;
}
/* only need to read the high RXSTMP register to clear the lock */
if (time_is_before_jiffies(rx_event + 5*HZ)) {
rd32(hw, TXGBE_PSR_1588_STMPH);
adapter->last_rx_ptp_check = jiffies;
adapter->rx_hwtstamp_cleared++;
e_warn(drv, "clearing RX Timestamp hang");
}
}
/**
* txgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state
* @adapter: the private adapter structure
*
* This function should be called whenever the state related to a Tx timestamp
* needs to be cleared. This helps ensure that all related bits are reset for
* the next Tx timestamp event.
*/
static void txgbe_ptp_clear_tx_timestamp(struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
rd32(hw, TXGBE_TSC_1588_STMPH);
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
}
clear_bit_unlock(__TXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
* txgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: the private adapter struct
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
static void txgbe_ptp_tx_hwtstamp(struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval = 0;
regval |= (u64)rd32(hw, TXGBE_TSC_1588_STMPL);
regval |= (u64)rd32(hw, TXGBE_TSC_1588_STMPH) << 32;
txgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval);
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
txgbe_ptp_clear_tx_timestamp(adapter);
}
/**
* txgbe_ptp_tx_hwtstamp_work
* @work: pointer to the work struct
*
* This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware
* timestamp has been taken for the current skb. It is necesary, because the
* descriptor's "done" bit does not correlate with the timestamp event.
*/
static void txgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
{
struct txgbe_adapter *adapter = container_of(work, struct txgbe_adapter,
ptp_tx_work);
struct txgbe_hw *hw = &adapter->hw;
bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
TXGBE_PTP_TX_TIMEOUT);
u32 tsynctxctl;
/* we have to have a valid skb to poll for a timestamp */
if (!adapter->ptp_tx_skb) {
txgbe_ptp_clear_tx_timestamp(adapter);
return;
}
/* stop polling once we have a valid timestamp */
tsynctxctl = rd32(hw, TXGBE_TSC_1588_CTL);
if (tsynctxctl & TXGBE_TSC_1588_CTL_VALID) {
txgbe_ptp_tx_hwtstamp(adapter);
return;
}
/* check timeout last in case timestamp event just occurred */
if (timeout) {
txgbe_ptp_clear_tx_timestamp(adapter);
adapter->tx_hwtstamp_timeouts++;
e_warn(drv, "clearing Tx Timestamp hang");
} else {
/* reschedule to keep checking until we timeout */
schedule_work(&adapter->ptp_tx_work);
}
}
/**
* txgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
void txgbe_ptp_rx_hwtstamp(struct txgbe_adapter *adapter, struct sk_buff *skb)
{
struct txgbe_hw *hw = &adapter->hw;
u64 regval = 0;
u32 tsyncrxctl;
/*
* Read the tsyncrxctl register afterwards in order to prevent taking an
* I/O hit on every packet.
*/
tsyncrxctl = rd32(hw, TXGBE_PSR_1588_CTL);
if (!(tsyncrxctl & TXGBE_PSR_1588_CTL_VALID))
return;
regval |= (u64)rd32(hw, TXGBE_PSR_1588_STMPL);
regval |= (u64)rd32(hw, TXGBE_PSR_1588_STMPH) << 32;
txgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
/**
* txgbe_ptp_get_ts_config - get current hardware timestamping configuration
* @adapter: pointer to adapter structure
* @ifreq: ioctl data
*
* This function returns the current timestamping settings. Rather than
* attempt to deconstruct registers to fill in the values, simply keep a copy
* of the old settings around, and return a copy when requested.
*/
int txgbe_ptp_get_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr)
{
struct hwtstamp_config *config = &adapter->tstamp_config;
return copy_to_user(ifr->ifr_data, config,
sizeof(*config)) ? -EFAULT : 0;
}
/**
* txgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode
* @adapter: the private txgbe adapter structure
* @config: the hwtstamp configuration requested
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
* when no packet needs it. At most one packet in the queue may be
* marked for time stamping, otherwise it would be impossible to tell
* for sure to which packet the hardware time stamp belongs.
*
* Incoming time stamping has to be configured via the hardware
* filters. Not all combinations are supported, in particular event
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
*
* Since hardware always timestamps Path delay packets when timestamping V2
* packets, regardless of the type specified in the register, only use V2
* Event mode. This more accurately tells the user what the hardware is going
* to do anyways.
*
* Note: this may modify the hwtstamp configuration towards a more general
* mode, if required to support the specifically requested mode.
*/
static int txgbe_ptp_set_timestamp_mode(struct txgbe_adapter *adapter,
struct hwtstamp_config *config)
{
struct txgbe_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = TXGBE_TSC_1588_CTL_ENABLED;
u32 tsync_rx_ctl = TXGBE_PSR_1588_CTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
bool is_l2 = false;
u32 regval;
/* reserved for future extensions */
if (config->flags)
return -EINVAL;
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
adapter->flags &= ~(TXGBE_FLAG_RX_HWTSTAMP_ENABLED |
TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_L4_V1;
tsync_rx_mtrl |= TXGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG;
adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED |
TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_L4_V1;
tsync_rx_mtrl |= TXGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG;
adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED |
TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED |
TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
default:
/* register RXMTRL must be set in order to do V1 packets,
* therefore it is not possible to time stamp both V1 Sync and
* Delay_Req messages unless hardware supports timestamping all
* packets => return error
*/
adapter->flags &= ~(TXGBE_FLAG_RX_HWTSTAMP_ENABLED |
TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
/* define ethertype filter for timestamping L2 packets */
if (is_l2)
wr32(hw,
TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_1588),
(TXGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */
TXGBE_PSR_ETYPE_SWC_1588 | /* enable timestamping */
ETH_P_1588)); /* 1588 eth protocol type */
else
wr32(hw,
TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_1588),
0);
/* enable/disable TX */
regval = rd32(hw, TXGBE_TSC_1588_CTL);
regval &= ~TXGBE_TSC_1588_CTL_ENABLED;
regval |= tsync_tx_ctl;
wr32(hw, TXGBE_TSC_1588_CTL, regval);
/* enable/disable RX */
regval = rd32(hw, TXGBE_PSR_1588_CTL);
regval &= ~(TXGBE_PSR_1588_CTL_ENABLED | TXGBE_PSR_1588_CTL_TYPE_MASK);
regval |= tsync_rx_ctl;
wr32(hw, TXGBE_PSR_1588_CTL, regval);
/* define which PTP packets are time stamped */
wr32(hw, TXGBE_PSR_1588_MSGTYPE, tsync_rx_mtrl);
TXGBE_WRITE_FLUSH(hw);
/* clear TX/RX timestamp state, just to be sure */
txgbe_ptp_clear_tx_timestamp(adapter);
rd32(hw, TXGBE_PSR_1588_STMPH);
return 0;
}
/**
* txgbe_ptp_set_ts_config - user entry point for timestamp mode
* @adapter: pointer to adapter struct
* @ifreq: ioctl data
*
* Set hardware to requested mode. If unsupported, return an error with no
* changes. Otherwise, store the mode for future reference.
*/
int txgbe_ptp_set_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr)
{
struct hwtstamp_config config;
int err;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
err = txgbe_ptp_set_timestamp_mode(adapter, &config);
if (err)
return err;
/* save these settings for future reference */
memcpy(&adapter->tstamp_config, &config,
sizeof(adapter->tstamp_config));
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
static void txgbe_ptp_link_speed_adjust(struct txgbe_adapter *adapter,
u32 *shift, u32 *incval)
{
/**
* Scale the NIC cycle counter by a large factor so that
* relatively small corrections to the frequency can be added
* or subtracted. The drawbacks of a large factor include
* (a) the clock register overflows more quickly, (b) the cycle
* counter structure must be able to convert the systime value
* to nanoseconds using only a multiplier and a right-shift,
* and (c) the value must fit within the timinca register space
* => math based on internal DMA clock rate and available bits
*
* Note that when there is no link, internal DMA clock is same as when
* link speed is 10Gb. Set the registers correctly even when link is
* down to preserve the clock setting
*/
switch (adapter->link_speed) {
case TXGBE_LINK_SPEED_10_FULL:
*shift = TXGBE_INCVAL_SHIFT_10;
*incval = TXGBE_INCVAL_10;
break;
case TXGBE_LINK_SPEED_100_FULL:
*shift = TXGBE_INCVAL_SHIFT_100;
*incval = TXGBE_INCVAL_100;
break;
case TXGBE_LINK_SPEED_1GB_FULL:
*shift = TXGBE_INCVAL_SHIFT_1GB;
*incval = TXGBE_INCVAL_1GB;
break;
case TXGBE_LINK_SPEED_10GB_FULL:
default: /* TXGBE_LINK_SPEED_10GB_FULL */
*shift = TXGBE_INCVAL_SHIFT_10GB;
*incval = TXGBE_INCVAL_10GB;
break;
}
return;
}
/**
* txgbe_ptp_start_cyclecounter - create the cycle counter from hw
* @adapter: pointer to the adapter structure
*
* This function should be called to set the proper values for the TIMINCA
* register and tell the cyclecounter structure what the tick rate of SYSTIME
* is. It does not directly modify SYSTIME registers or the timecounter
* structure. It should be called whenever a new TIMINCA value is necessary,
* such as during initialization or when the link speed changes.
*/
void txgbe_ptp_start_cyclecounter(struct txgbe_adapter *adapter)
{
struct txgbe_hw *hw = &adapter->hw;
unsigned long flags;
struct cyclecounter cc;
u32 incval = 0;
/* For some of the boards below this mask is technically incorrect.
* The timestamp mask overflows at approximately 61bits. However the
* particular hardware does not overflow on an even bitmask value.
* Instead, it overflows due to conversion of upper 32bits billions of
* cycles. Timecounters are not really intended for this purpose so
* they do not properly function if the overflow point isn't 2^N-1.
* However, the actual SYSTIME values in question take ~138 years to
* overflow. In practice this means they won't actually overflow. A
* proper fix to this problem would require modification of the
* timecounter delta calculations.
*/
cc.mask = CLOCKSOURCE_MASK(64);
cc.mult = 1;
cc.shift = 0;
cc.read = txgbe_ptp_read;
txgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval);
wr32(hw, TXGBE_TSC_1588_INC,
TXGBE_TSC_1588_INC_IVP(incval, 2));
/* update the base incval used to calculate frequency adjustment */
WRITE_ONCE(adapter->base_incval, incval);
smp_mb();
/* need lock to prevent incorrect read while modifying cyclecounter */
spin_lock_irqsave(&adapter->tmreg_lock, flags);
memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc));
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
}
/**
* txgbe_ptp_reset
* @adapter: the txgbe private board structure
*
* When the MAC resets, all of the hardware configuration for timesync is
* reset. This function should be called to re-enable the device for PTP,
* using the last known settings. However, we do lose the current clock time,
* so we fallback to resetting it based on the kernel's realtime clock.
*
* This function will maintain the hwtstamp_config settings, and it retriggers
* the SDP output if it's enabled.
*/
void txgbe_ptp_reset(struct txgbe_adapter *adapter)
{
unsigned long flags;
/* reset the hardware timestamping mode */
txgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
txgbe_ptp_start_cyclecounter(adapter);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
adapter->last_overflow_check = jiffies;
}
/**
* txgbe_ptp_create_clock
* @adapter: the txgbe private adapter structure
*
* This functino performs setup of the user entry point function table and
* initalizes the PTP clock device used by userspace to access the clock-like
* features of the PTP core. It will be called by txgbe_ptp_init, and may
* re-use a previously initialized clock (such as during a suspend/resume
* cycle).
*/
static long txgbe_ptp_create_clock(struct txgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
long err;
/* do nothing if we already have a clock device */
if (!IS_ERR_OR_NULL(adapter->ptp_clock))
return 0;
snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name),
"%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000; /* 10^-9s */
adapter->ptp_caps.n_alarm = 0;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 0;
#ifndef HAVE_NOT_PTT_ADJFREQ
adapter->ptp_caps.adjfreq = txgbe_ptp_adjfreq;
#endif
adapter->ptp_caps.adjtime = txgbe_ptp_adjtime;
#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
adapter->ptp_caps.gettime64 = txgbe_ptp_gettime64;
adapter->ptp_caps.settime64 = txgbe_ptp_settime64;
#else
adapter->ptp_caps.gettime = txgbe_ptp_gettime;
adapter->ptp_caps.settime = txgbe_ptp_settime;
#endif
adapter->ptp_caps.enable = txgbe_ptp_feature_enable;
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
pci_dev_to_dev(adapter->pdev));
if (IS_ERR(adapter->ptp_clock)) {
err = PTR_ERR(adapter->ptp_clock);
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
return err;
} else
e_dev_info("registered PHC device on %s\n", netdev->name);
/* Set the default timestamp mode to disabled here. We do this in
* create_clock instead of initialization, because we don't want to
* override the previous settings during a suspend/resume cycle.
*/
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
return 0;
}
/**
* txgbe_ptp_init
* @adapter: the txgbe private adapter structure
*
* This function performs the required steps for enabling ptp
* support. If ptp support has already been loaded it simply calls the
* cyclecounter init routine and exits.
*/
void txgbe_ptp_init(struct txgbe_adapter *adapter)
{
/* initialize the spin lock first, since the user might call the clock
* functions any time after we've initialized the ptp clock device.
*/
spin_lock_init(&adapter->tmreg_lock);
/* obtain a ptp clock device, or re-use an existing device */
if (txgbe_ptp_create_clock(adapter))
return;
/* we have a clock, so we can intialize work for timestamps now */
INIT_WORK(&adapter->ptp_tx_work, txgbe_ptp_tx_hwtstamp_work);
/* reset the ptp related hardware bits */
txgbe_ptp_reset(adapter);
/* enter the TXGBE_PTP_RUNNING state */
set_bit(__TXGBE_PTP_RUNNING, &adapter->state);
return;
}
/**
* txgbe_ptp_suspend - stop ptp work items
* @adapter: pointer to adapter struct
*
* This function suspends ptp activity, and prevents more work from being
* generated, but does not destroy the clock device.
*/
void txgbe_ptp_suspend(struct txgbe_adapter *adapter)
{
/* leave the TXGBE_PTP_RUNNING STATE */
if (!test_and_clear_bit(__TXGBE_PTP_RUNNING, &adapter->state))
return;
adapter->flags2 &= ~TXGBE_FLAG2_PTP_PPS_ENABLED;
cancel_work_sync(&adapter->ptp_tx_work);
txgbe_ptp_clear_tx_timestamp(adapter);
}
/**
* txgbe_ptp_stop - destroy the ptp_clock device
* @adapter: pointer to adapter struct
*
* Completely destroy the ptp_clock device, and disable all PTP related
* features. Intended to be run when the device is being closed.
*/
void txgbe_ptp_stop(struct txgbe_adapter *adapter)
{
/* first, suspend ptp activity */
txgbe_ptp_suspend(adapter);
/* now destroy the ptp clock device */
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
e_dev_info("removed PHC on %s\n",
adapter->netdev->name);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,77 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
#ifndef _TXGBE_SRIOV_H_
#define _TXGBE_SRIOV_H_
/* txgbe driver limit the max number of VFs could be enabled to
* 63 (TXGBE_MAX_VF_FUNCTIONS - 1)
*/
#define TXGBE_MAX_VFS_DRV_LIMIT (TXGBE_MAX_VF_FUNCTIONS - 1)
void txgbe_restore_vf_multicasts(struct txgbe_adapter *adapter);
int txgbe_set_vf_vlan(struct txgbe_adapter *adapter, int add, int vid, u16 vf);
void txgbe_set_vmolr(struct txgbe_hw *hw, u16 vf, bool aupe);
void txgbe_msg_task(struct txgbe_adapter *adapter);
int txgbe_set_vf_mac(struct txgbe_adapter *adapter,
u16 vf, unsigned char *mac_addr);
void txgbe_disable_tx_rx(struct txgbe_adapter *adapter);
void txgbe_ping_all_vfs(struct txgbe_adapter *adapter);
void txgbe_set_all_vfs(struct txgbe_adapter *adapter);
#ifdef IFLA_VF_MAX
int txgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
#ifdef IFLA_VF_VLAN_INFO_MAX
int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos, __be16 vlan_proto);
#else
int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
#endif
#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
int txgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
#else
int txgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
int txgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
#endif
#ifdef HAVE_NDO_SET_VF_LINK_STATE
int txgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state);
#endif
#ifdef HAVE_NDO_SET_VF_TRUST
int txgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
#endif
int txgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
#endif /* IFLA_VF_MAX */
int txgbe_disable_sriov(struct txgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
int txgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void txgbe_enable_sriov(struct txgbe_adapter *adapter);
#endif
int txgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
void txgbe_set_vf_link_state(struct txgbe_adapter *adapter, int vf, int state);
/*
* These are defined in txgbe_type.h on behalf of the VF driver
* but we need them here unwrapped for the PF driver.
*/
#define TXGBE_DEV_ID_SP_VF 0x1000
#endif /* _TXGBE_SRIOV_H_ */

View File

@ -0,0 +1,229 @@
/*
* WangXun 10 Gigabit PCI Express Linux driver
* Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* based on ixgbe_sysfs.c, Copyright(c) 1999 - 2017 Intel Corporation.
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "txgbe.h"
#include "txgbe_hw.h"
#include "txgbe_type.h"
#ifdef TXGBE_SYSFS
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/time.h>
#ifdef TXGBE_HWMON
#include <linux/hwmon.h>
#endif
#ifdef TXGBE_HWMON
/* hwmon callback functions */
static ssize_t txgbe_hwmon_show_temp(struct device __always_unused *dev,
struct device_attribute *attr,
char *buf)
{
struct hwmon_attr *txgbe_attr = container_of(attr, struct hwmon_attr,
dev_attr);
unsigned int value;
/* reset the temp field */
TCALL(txgbe_attr->hw, mac.ops.get_thermal_sensor_data);
value = txgbe_attr->sensor->temp;
/* display millidegree */
value *= 1000;
return sprintf(buf, "%u\n", value);
}
static ssize_t txgbe_hwmon_show_alarmthresh(struct device __always_unused *dev,
struct device_attribute *attr,
char *buf)
{
struct hwmon_attr *txgbe_attr = container_of(attr, struct hwmon_attr,
dev_attr);
unsigned int value = txgbe_attr->sensor->alarm_thresh;
/* display millidegree */
value *= 1000;
return sprintf(buf, "%u\n", value);
}
static ssize_t txgbe_hwmon_show_dalarmthresh(struct device __always_unused *dev,
struct device_attribute *attr,
char *buf)
{
struct hwmon_attr *txgbe_attr = container_of(attr, struct hwmon_attr,
dev_attr);
unsigned int value = txgbe_attr->sensor->dalarm_thresh;
/* display millidegree */
value *= 1000;
return sprintf(buf, "%u\n", value);
}
/**
* txgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
* @adapter: pointer to the adapter structure
* @type: type of sensor data to display
*
* For each file we want in hwmon's sysfs interface we need a device_attribute
* This is included in our hwmon_attr struct that contains the references to
* the data structures we need to get the data to display.
*/
static int txgbe_add_hwmon_attr(struct txgbe_adapter *adapter, int type)
{
int rc;
unsigned int n_attr;
struct hwmon_attr *txgbe_attr;
n_attr = adapter->txgbe_hwmon_buff.n_hwmon;
txgbe_attr = &adapter->txgbe_hwmon_buff.hwmon_list[n_attr];
switch (type) {
case TXGBE_HWMON_TYPE_TEMP:
txgbe_attr->dev_attr.show = txgbe_hwmon_show_temp;
snprintf(txgbe_attr->name, sizeof(txgbe_attr->name),
"temp%u_input", 0);
break;
case TXGBE_HWMON_TYPE_ALARMTHRESH:
txgbe_attr->dev_attr.show = txgbe_hwmon_show_alarmthresh;
snprintf(txgbe_attr->name, sizeof(txgbe_attr->name),
"temp%u_alarmthresh", 0);
break;
case TXGBE_HWMON_TYPE_DALARMTHRESH:
txgbe_attr->dev_attr.show = txgbe_hwmon_show_dalarmthresh;
snprintf(txgbe_attr->name, sizeof(txgbe_attr->name),
"temp%u_dalarmthresh", 0);
break;
default:
rc = -EPERM;
return rc;
}
/* These always the same regardless of type */
txgbe_attr->sensor =
&adapter->hw.mac.thermal_sensor_data.sensor;
txgbe_attr->hw = &adapter->hw;
txgbe_attr->dev_attr.store = NULL;
txgbe_attr->dev_attr.attr.mode = S_IRUGO;
txgbe_attr->dev_attr.attr.name = txgbe_attr->name;
rc = device_create_file(pci_dev_to_dev(adapter->pdev),
&txgbe_attr->dev_attr);
if (rc == 0)
++adapter->txgbe_hwmon_buff.n_hwmon;
return rc;
}
#endif /* TXGBE_HWMON */
static void txgbe_sysfs_del_adapter(
struct txgbe_adapter __maybe_unused *adapter)
{
#ifdef TXGBE_HWMON
int i;
if (adapter == NULL)
return;
for (i = 0; i < adapter->txgbe_hwmon_buff.n_hwmon; i++) {
device_remove_file(pci_dev_to_dev(adapter->pdev),
&adapter->txgbe_hwmon_buff.hwmon_list[i].dev_attr);
}
kfree(adapter->txgbe_hwmon_buff.hwmon_list);
if (adapter->txgbe_hwmon_buff.device)
hwmon_device_unregister(adapter->txgbe_hwmon_buff.device);
#endif /* TXGBE_HWMON */
}
/* called from txgbe_main.c */
void txgbe_sysfs_exit(struct txgbe_adapter *adapter)
{
txgbe_sysfs_del_adapter(adapter);
}
/* called from txgbe_main.c */
int txgbe_sysfs_init(struct txgbe_adapter *adapter)
{
int rc = 0;
#ifdef TXGBE_HWMON
struct hwmon_buff *txgbe_hwmon = &adapter->txgbe_hwmon_buff;
int n_attrs;
#endif /* TXGBE_HWMON */
if (adapter == NULL)
goto err;
#ifdef TXGBE_HWMON
/* Don't create thermal hwmon interface if no sensors present */
if (TCALL(&adapter->hw, mac.ops.init_thermal_sensor_thresh))
goto no_thermal;
/*
* Allocation space for max attributs
* max num sensors * values (temp, alamthresh, dalarmthresh)
*/
n_attrs = 3;
txgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
GFP_KERNEL);
if (!txgbe_hwmon->hwmon_list) {
rc = -ENOMEM;
goto err;
}
txgbe_hwmon->device =
hwmon_device_register(pci_dev_to_dev(adapter->pdev));
if (IS_ERR(txgbe_hwmon->device)) {
rc = PTR_ERR(txgbe_hwmon->device);
goto err;
}
/* Bail if any hwmon attr struct fails to initialize */
rc = txgbe_add_hwmon_attr(adapter, TXGBE_HWMON_TYPE_TEMP);
rc |= txgbe_add_hwmon_attr(adapter, TXGBE_HWMON_TYPE_ALARMTHRESH);
rc |= txgbe_add_hwmon_attr(adapter, TXGBE_HWMON_TYPE_DALARMTHRESH);
if (rc)
goto err;
no_thermal:
#endif /* TXGBE_HWMON */
goto exit;
err:
txgbe_sysfs_del_adapter(adapter);
exit:
return rc;
}
#endif /* TXGBE_SYSFS */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 1999 - 2022 Intel Corporation. */
#ifndef _TXGBE_TXRX_COMMON_H_
#define _TXGBE_TXRX_COMMON_H_
#ifndef TXGBE_TXD_CMD
#define TXGBE_TXD_CMD (TXGBE_TXD_EOP | \
TXGBE_TXD_RS)
#endif
#define TXGBE_XDP_PASS 0
#define TXGBE_XDP_CONSUMED BIT(0)
#define TXGBE_XDP_TX BIT(1)
#define TXGBE_XDP_REDIR BIT(2)
#ifdef HAVE_XDP_SUPPORT
#ifdef HAVE_XDP_FRAME_STRUCT
int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_frame *xdpf);
#else
int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_buff *xdp);
#endif
#ifdef HAVE_AF_XDP_ZC_SUPPORT
void txgbe_txrx_ring_disable(struct txgbe_adapter *adapter, int ring);
void txgbe_txrx_ring_enable(struct txgbe_adapter *adapter, int ring);
#ifndef HAVE_NETDEV_BPF_XSK_POOL
struct xdp_umem *txgbe_xsk_umem(struct txgbe_adapter *adapter,
struct txgbe_ring *ring);
int txgbe_xsk_umem_setup(struct txgbe_adapter *adapter, struct xdp_umem *umem,
u16 qid);
#else
struct xsk_buff_pool *txgbe_xsk_umem(struct txgbe_adapter *adapter,
struct txgbe_ring *ring);
int txgbe_xsk_umem_setup(struct txgbe_adapter *adapter, struct xsk_buff_pool *umem,
u16 qid);
#endif /* HAVE_NETDEV_BPF_XSK_POOL */
#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL
void txgbe_alloc_rx_buffers_zc(struct txgbe_ring *rx_ring, u16 cleaned_count);
void txgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
#else
bool txgbe_alloc_rx_buffers_zc(struct txgbe_ring *rx_ring, u16 cleaned_count);
#endif
int txgbe_clean_rx_irq_zc(struct txgbe_q_vector *q_vector,
struct txgbe_ring *rx_ring,
const int budget);
void txgbe_xsk_clean_rx_ring(struct txgbe_ring *rx_ring);
bool txgbe_clean_xdp_tx_irq(struct txgbe_q_vector *q_vector,
struct txgbe_ring *tx_ring);
#ifdef HAVE_NDO_XSK_WAKEUP
int txgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
#else
int txgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id);
#endif
void txgbe_xsk_clean_tx_ring(struct txgbe_ring *tx_ring);
bool txgbe_xsk_any_rx_ring_enabled(struct txgbe_adapter *adapter);
#endif /* HAVE_AF_XDP_ZC_SUPPORT */
#endif /* HAVE_XDP_SUPPORT */
bool txgbe_cleanup_headers(struct txgbe_ring __maybe_unused *rx_ring,
union txgbe_rx_desc *rx_desc,
struct sk_buff *skb);
void txgbe_process_skb_fields(struct txgbe_ring *rx_ring,
union txgbe_rx_desc *rx_desc,
struct sk_buff *skb);
void txgbe_rx_skb(struct txgbe_q_vector *q_vector,
struct txgbe_ring *rx_ring,
union txgbe_rx_desc *rx_desc,
struct sk_buff *skb);
#endif /* _TXGBE_TXRX_COMMON_H_ */