!146 [next-6.6]kunpeng:Backport hns3 features and bugfixes

Merge pull request !146 from hongrongxuan/linux-6.6/next-ronson-dev
This commit is contained in:
chinaljp030 2024-05-08 06:43:35 +00:00 committed by Gitee
commit ffca8a908f
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
42 changed files with 5618 additions and 182 deletions

View File

@ -9531,6 +9531,11 @@ S: Maintained
F: Documentation/ABI/testing/debugfs-hisi-zip
F: drivers/crypto/hisilicon/zip/
HISILICON HNS3 PTP SYNC DRIVER
M: Yonglong Liu <liuyonglong@huawei.com>
S: Supported
F: drivers/ptp/ptp_hisi.c
HMM - Heterogeneous Memory Management
M: Jérôme Glisse <jglisse@redhat.com>
L: linux-mm@kvack.org

View File

@ -899,6 +899,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_XGENE_SLIMPRO=m
CONFIG_I2C_SLAVE=y
CONFIG_SPI=y
CONFIG_PTP_HISI=m
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_DWAPB=y
CONFIG_POWER_RESET_HISI=y

View File

@ -92,6 +92,7 @@ config HNS3
depends on PCI
select NET_DEVLINK
select PAGE_POOL
select PAGE_POOL_STATS
help
This selects the framework support for Hisilicon Network Subsystem 3.
This layer facilitates clients like ENET, RoCE and user-space ethernet

View File

@ -12,6 +12,7 @@ obj-$(CONFIG_HNS3) += hnae3.o
obj-$(CONFIG_HNS3_ENET) += hns3.o
hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-objs += hns3_ext.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
@ -24,6 +25,6 @@ obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
hclge-objs += hns3pf/hclge_ext.o
hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o

View File

@ -47,6 +47,8 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */
HCLGE_MBX_SET_QB = 0x28, /* (VF -> PF) set queue bonding */
HCLGE_MBX_PUSH_QB_STATE, /* (PF -> VF) push qb state */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
@ -77,6 +79,12 @@ enum hclge_mbx_tbl_cfg_subcode {
HCLGE_MBX_VPORT_LIST_CLEAR,
};
enum hclge_mbx_qb_cfg_subcode {
HCLGE_MBX_QB_CHECK_CAPS = 0, /* query whether support qb */
HCLGE_MBX_QB_ENABLE, /* request pf enable qb */
HCLGE_MBX_QB_GET_STATE /* query whether qb enabled */
};
#define HCLGE_MBX_MAX_MSG_SIZE 14
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4

View File

@ -43,6 +43,7 @@
#define HNAE3_DEVICE_VERSION_V1 0x00020
#define HNAE3_DEVICE_VERSION_V2 0x00021
#define HNAE3_DEVICE_VERSION_V3 0x00030
#define HNAE3_DEVICE_VERSION_V4 0x00032
#define HNAE3_PCI_REVISION_BIT_SIZE 8
@ -103,6 +104,9 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_LANE_NUM_B,
HNAE3_DEV_SUPPORT_WOL_B,
HNAE3_DEV_SUPPORT_TM_FLUSH_B,
HNAE3_DEV_SUPPORT_VF_FAULT_B,
HNAE3_DEV_SUPPORT_NOTIFY_PKT_B,
HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B,
};
#define hnae3_ae_dev_fd_supported(ae_dev) \
@ -177,6 +181,15 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_tm_flush_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps)
#define hnae3_ae_dev_vf_fault_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps)
#define hnae3_ae_dev_notify_pkt_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_NOTIFY_PKT_B, (ae_dev)->caps)
#define hnae3_ae_dev_gen_reg_dfx_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B, (hdev)->ae_dev->caps)
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@ -271,6 +284,7 @@ enum hnae3_reset_type {
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
HNAE3_NONE_RESET,
HNAE3_VF_EXP_RESET,
HNAE3_MAX_RESET,
};
@ -357,6 +371,15 @@ struct hnae3_vector_info {
#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
#define HNAE3_SCC_VERSION_BYTE3_SHIFT 24
#define HNAE3_SCC_VERSION_BYTE3_MASK GENMASK(31, 24)
#define HNAE3_SCC_VERSION_BYTE2_SHIFT 16
#define HNAE3_SCC_VERSION_BYTE2_MASK GENMASK(23, 16)
#define HNAE3_SCC_VERSION_BYTE1_SHIFT 8
#define HNAE3_SCC_VERSION_BYTE1_MASK GENMASK(15, 8)
#define HNAE3_SCC_VERSION_BYTE0_SHIFT 0
#define HNAE3_SCC_VERSION_BYTE0_MASK GENMASK(7, 0)
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
@ -383,6 +406,7 @@ struct hnae3_dev_specs {
u16 mc_mac_size;
u32 mac_stats_num;
u8 tnl_num;
u8 hilink_version;
};
struct hnae3_client_ops {
@ -563,6 +587,10 @@ struct hnae3_ae_dev {
* Check if any cls flower rule exist
* dbg_read_cmd
* Execute debugfs read command.
* request_flush_qb_config
* Request to update queue bonding configuration
* query_fd_qb_state
* Query whether hw queue bonding enabled
* set_tx_hwts_info
* Save information for 1588 tx packet
* get_rx_hwts
@ -762,6 +790,8 @@ struct hnae3_ae_ops {
struct ethtool_link_ksettings *cmd);
int (*set_phy_link_ksettings)(struct hnae3_handle *handle,
const struct ethtool_link_ksettings *cmd);
void (*request_flush_qb_config)(struct hnae3_handle *handle);
bool (*query_fd_qb_state)(struct hnae3_handle *handle);
bool (*set_tx_hwts_info)(struct hnae3_handle *handle,
struct sk_buff *skb);
void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb,
@ -777,6 +807,8 @@ struct hnae3_ae_ops {
struct ethtool_wolinfo *wol);
int (*set_wol)(struct hnae3_handle *handle,
struct ethtool_wolinfo *wol);
int (*priv_ops)(struct hnae3_handle *handle, int opcode,
void *data, size_t length);
};
struct hnae3_dcb_ops {
@ -814,6 +846,7 @@ struct hnae3_tc_info {
u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
bool mqprio_destroy;
bool dcb_ets_active;
};
@ -874,6 +907,7 @@ struct hnae3_roce_private_info {
enum hnae3_pflag {
HNAE3_PFLAG_LIMIT_PROMISC,
HNAE3_PFLAG_FD_QB_ENABLE,
HNAE3_PFLAG_MAX
};

View File

@ -0,0 +1,121 @@
/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2023 Hisilicon Limited.
#ifndef __HNAE3_EXT_H
#define __HNAE3_EXT_H
enum hnae3_event_type_custom {
HNAE3_VF_RESET_CUSTOM,
HNAE3_VF_FUNC_RESET_CUSTOM,
HNAE3_VF_PF_FUNC_RESET_CUSTOM,
HNAE3_VF_FULL_RESET_CUSTOM,
HNAE3_FLR_RESET_CUSTOM,
HNAE3_FUNC_RESET_CUSTOM,
HNAE3_GLOBAL_RESET_CUSTOM,
HNAE3_IMP_RESET_CUSTOM,
HNAE3_UNKNOWN_RESET_CUSTOM,
HNAE3_NONE_RESET_CUSTOM,
HNAE3_PORT_FAULT,
HNAE3_RESET_DONE_CUSTOM,
HNAE3_FUNC_RESET_FAIL_CUSTOM,
HNAE3_GLOBAL_RESET_FAIL_CUSTOM,
HNAE3_IMP_RESET_FAIL_CUSTOM,
HNAE3_PPU_POISON_CUSTOM,
HNAE3_IMP_RD_POISON_CUSTOM,
HNAE3_ROCEE_AXI_RESP_CUSTOM,
HNAE3_INVALID_EVENT_CUSTOM,
};
enum hnae3_ext_opcode {
HNAE3_EXT_OPC_RESET,
HNAE3_EXT_OPC_EVENT_CALLBACK,
HNAE3_EXT_OPC_GET_PFC_STORM_PARA,
HNAE3_EXT_OPC_SET_PFC_STORM_PARA,
HNAE3_EXT_OPC_SET_NOTIFY_PARAM,
HNAE3_EXT_OPC_SET_NOTIFY_START,
HNAE3_EXT_OPC_SET_TORUS_PARAM,
HNAE3_EXT_OPC_GET_TORUS_PARAM,
HNAE3_EXT_OPC_CLEAN_STATS64,
HNAE3_EXT_OPC_GET_PORT_EXT_ID_INFO,
HNAE3_EXT_OPC_GET_PORT_EXT_NUM_INFO,
HNAE3_EXT_OPC_GET_PORT_NUM,
HNAE3_EXT_OPC_GET_PRESENT,
HNAE3_EXT_OPC_SET_SFP_STATE,
HNAE3_EXT_OPC_DISABLE_LANE,
HNAE3_EXT_OPC_GET_LANE_STATUS,
HNAE3_EXT_OPC_DISABLE_CLOCK,
HNAE3_EXT_OPC_SET_PFC_TIME,
HNAE3_EXT_OPC_GET_HILINK_REF_LOS,
HNAE3_EXT_OPC_GET_PORT_FAULT_STATUS,
HNAE3_EXT_OPC_GET_PORT_TYPE,
HNAE3_EXT_OPC_SET_MAC_STATE,
HNAE3_EXT_OPC_SET_LED,
HNAE3_EXT_OPC_GET_LED_SIGNAL,
HNAE3_EXT_OPC_GET_PHY_REG,
HNAE3_EXT_OPC_SET_PHY_REG,
};
struct hnae3_led_state_para {
u32 type;
u32 status;
};
struct hnae3_phy_para {
u32 page_select_addr;
u32 reg_addr;
u16 page;
u16 data;
};
struct hnae3_lamp_signal {
u8 error;
u8 locate;
u8 activity;
};
struct hnae3_pfc_storm_para {
u32 dir;
u32 enable;
u32 period_ms;
u32 times;
u32 recovery_period_ms;
};
enum hnae3_port_fault_type {
HNAE3_FAULT_TYPE_CDR_FLASH,
HNAE3_FAULT_TYPE_9545_ERR,
HNAE3_FAULT_TYPE_CDR_CORE,
HNAE3_FAULT_TYPE_HILINK_REF_LOS,
HNAE3_FAULT_TYPE_INVALID
};
struct hnae3_port_fault {
u32 fault_type;
u32 fault_status;
};
struct hnae3_notify_pkt_param {
u32 ipg; /* inter-packet gap of sending, the unit is one cycle of clock */
u16 num; /* packet number of sending */
u8 enable; /* send enable, 0=Disable, 1=Enable */
u8 init; /* initialization flag, product does not need to set value */
u8 data[64]; /* note packet data */
};
struct hnae3_torus_param {
u32 enable; /* 1d torus mode enable */
u32 mac_id; /* export mac id of port */
u8 is_node0; /* if current node is node0 */
};
struct hane3_port_ext_id_info {
u32 chip_id;
u32 mac_id;
u32 io_die_id;
};
struct hane3_port_ext_num_info {
u32 chip_num;
u32 io_die_num;
};
#endif

View File

@ -157,6 +157,9 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
{HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
{HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B},
{HCLGE_COMM_CAP_NOTIFY_PKT_B, HNAE3_DEV_SUPPORT_NOTIFY_PKT_B},
{HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B, HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@ -350,7 +353,7 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
{
static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
{HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
{HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT},
};
u32 i;
@ -469,10 +472,14 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num)
{
bool is_special = hclge_comm_is_special_opcode(desc->opcode);
struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
int ret;
int ntc;
if (hw->cmq.ops.trace_cmd_send)
hw->cmq.ops.trace_cmd_send(hw, desc, num, is_special);
spin_lock_bh(&hw->cmq.csq.lock);
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
@ -506,6 +513,9 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
spin_unlock_bh(&hw->cmq.csq.lock);
if (hw->cmq.ops.trace_cmd_get)
hw->cmq.ops.trace_cmd_get(hw, desc, num, is_special);
return ret;
}
@ -583,6 +593,17 @@ err_csq:
return ret;
}
void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
const struct hclge_comm_cmq_ops *ops)
{
struct hclge_comm_cmq *cmdq = &hw->cmq;
if (ops) {
cmdq->ops.trace_cmd_send = ops->trace_cmd_send;
cmdq->ops.trace_cmd_get = ops->trace_cmd_get;
}
}
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
unsigned long reset_pending)

View File

@ -55,7 +55,7 @@
#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000
#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000
#define HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT 1000000
enum hclge_opcode_type {
/* Generic commands */
@ -91,8 +91,10 @@ enum hclge_opcode_type {
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
HCLGE_OPC_DFX_GEN_REG = 0x7038,
HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
HCLGE_OPC_GET_QUEUE_ERR_VF = 0x0067,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
@ -245,6 +247,9 @@ enum hclge_opcode_type {
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
/* SCC commands */
HCLGE_OPC_QUERY_SCC_VER = 0x1A84,
/* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
@ -309,6 +314,23 @@ enum hclge_opcode_type {
/* Query link diagnosis info command */
HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
/* EXT command */
HCLGE_OPC_CONFIG_NIC_CLOCK = 0x0060,
HCLGE_OPC_CONFIG_SWITCH_PARAM = 0x1033,
HCLGE_OPC_CONFIG_VLAN_FILTER = 0x1100,
HCLGE_OPC_SET_NOTIFY_PKT = 0x180A,
HCLGE_OPC_CONFIG_1D_TORUS = 0x2300,
HCLGE_OPC_CHIP_ID_GET = 0x7003,
HCLGE_OPC_GET_CHIP_NUM = 0x7005,
HCLGE_OPC_GET_PORT_NUM = 0x7006,
HCLGE_OPC_SET_LED = 0x7007,
HCLGE_OPC_DISABLE_NET_LANE = 0x7008,
HCLGE_OPC_CFG_PAUSE_STORM_PARA = 0x7019,
HCLGE_OPC_CFG_GET_HILINK_REF_LOS = 0x701B,
HCLGE_OPC_GET_PORT_FAULT_STATUS = 0x7023,
HCLGE_OPC_SFP_GET_PRESENT = 0x7101,
HCLGE_OPC_SFP_SET_STATUS = 0x7102,
};
enum hclge_comm_cmd_return_status {
@ -348,9 +370,12 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_GRO_B = 20,
HCLGE_COMM_CAP_FD_B = 21,
HCLGE_COMM_CAP_FEC_STATS_B = 25,
HCLGE_COMM_CAP_VF_FAULT_B = 26,
HCLGE_COMM_CAP_LANE_NUM_B = 27,
HCLGE_COMM_CAP_WOL_B = 28,
HCLGE_COMM_CAP_NOTIFY_PKT_B = 29,
HCLGE_COMM_CAP_TM_FLUSH_B = 31,
HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B = 32,
};
enum HCLGE_COMM_API_CAP_BITS {
@ -390,6 +415,11 @@ struct hclge_comm_query_version_cmd {
__le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */
};
struct hclge_comm_query_scc_cmd {
__le32 scc_version;
u8 rsv[20];
};
#define HCLGE_DESC_DATA_LEN 6
struct hclge_desc {
__le16 opcode;
@ -421,11 +451,22 @@ enum hclge_comm_cmd_status {
HCLGE_COMM_ERR_CSQ_ERROR = -3,
};
struct hclge_comm_hw;
struct hclge_comm_cmq_ops {
void (*trace_cmd_send)(struct hclge_comm_hw *hw,
struct hclge_desc *desc,
int num, bool is_special);
void (*trace_cmd_get)(struct hclge_comm_hw *hw,
struct hclge_desc *desc,
int num, bool is_special);
};
struct hclge_comm_cmq {
struct hclge_comm_cmq_ring csq;
struct hclge_comm_cmq_ring crq;
u16 tx_timeout;
enum hclge_comm_cmd_status last_status;
struct hclge_comm_cmq_ops ops;
};
struct hclge_comm_hw {
@ -472,5 +513,6 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
unsigned long reset_pending);
void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
const struct hclge_comm_cmq_ops *ops);
#endif

View File

@ -414,6 +414,9 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}, {
.name = "support tm flush",
.cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B,
}, {
.name = "support vf fault detect",
.cap_bit = HNAE3_DEV_SUPPORT_VF_FAULT_B,
}
};
@ -959,7 +962,7 @@ static const struct hns3_dbg_item tx_bd_info_items[] = {
{ "OT_VLAN_TAG", 3 },
{ "TV", 5 },
{ "OLT_VLAN_LEN", 2 },
{ "PAYLEN_OL4CS", 2 },
{ "PAYLEN_FDOP_OL4CS", 2 },
{ "BD_FE_SC_VLD", 2 },
{ "MSS_HW_CSUM", 0 },
};
@ -978,7 +981,7 @@ static void hns3_dump_tx_bd_info(struct hns3_desc *desc, char **result, int idx)
sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
sprintf(result[j++], "%u",
le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_fdop_ol4cs));
sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
}
@ -1094,6 +1097,8 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
*pos += scnprintf(buf + *pos, len - *pos,
"TX timeout threshold: %d seconds\n",
dev->watchdog_timeo / HZ);
*pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n",
dev_specs->hilink_version);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)

View File

@ -26,6 +26,7 @@
#include <net/geneve.h>
#include "hnae3.h"
#include "hnae3_ext.h"
#include "hns3_enet.h"
/* All hns3 tracepoints are defined by the include below, which
* must be included exactly once across the whole kernel with
@ -1541,6 +1542,73 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
return 0;
}
static bool hns3_query_fd_qb_state(struct hnae3_handle *handle)
{
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
if (!test_bit(HNAE3_PFLAG_FD_QB_ENABLE, &handle->priv_flags))
return false;
if (!ops->query_fd_qb_state)
return false;
return ops->query_fd_qb_state(handle);
}
/* fd_op is the field of tx bd indicates hw whether to add or delete
* a qb rule or do nothing.
*/
static u8 hns3_fd_qb_handle(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
struct hnae3_handle *handle = ring->tqp->handle;
union l4_hdr_info l4;
union l3_hdr_info l3;
u8 l4_proto_tmp = 0;
__be16 frag_off;
u8 ip_version;
u8 fd_op = 0;
if (!hns3_query_fd_qb_state(handle))
return 0;
if (skb->encapsulation) {
ip_version = inner_ip_hdr(skb)->version;
l3.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
} else {
ip_version = ip_hdr(skb)->version;
l3.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
}
if (ip_version == IP_VERSION_IPV6) {
unsigned char *exthdr;
exthdr = l3.hdr + sizeof(*l3.v6);
l4_proto_tmp = l3.v6->nexthdr;
if (l4.hdr != exthdr)
ipv6_skip_exthdr(skb, exthdr - skb->data,
&l4_proto_tmp, &frag_off);
} else if (ip_version == IP_VERSION_IPV4) {
l4_proto_tmp = l3.v4->protocol;
}
if (l4_proto_tmp != IPPROTO_TCP)
return 0;
ring->fd_qb_tx_sample++;
if (l4.tcp->fin || l4.tcp->rst) {
hnae3_set_bit(fd_op, HNS3_TXD_FD_DEL_B, 1);
ring->fd_qb_tx_sample = 0;
} else if (l4.tcp->syn ||
ring->fd_qb_tx_sample >= HNS3_FD_QB_FORCE_CNT_MAX) {
hnae3_set_bit(fd_op, HNS3_TXD_FD_ADD_B, 1);
ring->fd_qb_tx_sample = 0;
}
return fd_op;
}
/* check if the hardware is capable of checksum offloading */
static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
{
@ -1558,7 +1626,7 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
}
struct hns3_desc_param {
u32 paylen_ol4cs;
u32 paylen_fdop_ol4cs;
u32 ol_type_vlan_len_msec;
u32 type_cs_vlan_tso;
u16 mss_hw_csum;
@ -1568,7 +1636,7 @@ struct hns3_desc_param {
static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
{
pa->paylen_ol4cs = skb->len;
pa->paylen_fdop_ol4cs = skb->len;
pa->ol_type_vlan_len_msec = 0;
pa->type_cs_vlan_tso = 0;
pa->mss_hw_csum = 0;
@ -1636,7 +1704,7 @@ static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
return ret;
}
ret = hns3_set_tso(skb, &param->paylen_ol4cs, &param->mss_hw_csum,
ret = hns3_set_tso(skb, &param->paylen_fdop_ol4cs, &param->mss_hw_csum,
&param->type_cs_vlan_tso, &desc_cb->send_bytes);
if (unlikely(ret < 0)) {
hns3_ring_stats_update(ring, tx_tso_err);
@ -1650,6 +1718,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc_param param;
u8 fd_op;
int ret;
hns3_init_desc_data(skb, &param);
@ -1665,11 +1734,15 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
return ret;
}
fd_op = hns3_fd_qb_handle(ring, skb);
hnae3_set_field(param.paylen_fdop_ol4cs, HNS3_TXD_FD_OP_M,
HNS3_TXD_FD_OP_S, fd_op);
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(param.ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
desc->tx.paylen_fdop_ol4cs = cpu_to_le32(param.paylen_fdop_ol4cs);
desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
@ -4960,6 +5033,11 @@ static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
}
}
bool hns3_is_page_pool_enabled(void)
{
return page_pool_enabled;
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{
int ret;
@ -5390,6 +5468,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
hns3_state_init(handle);
if (test_bit(HNAE3_DEV_SUPPORT_QB_B, ae_dev->caps))
set_bit(HNAE3_PFLAG_FD_QB_ENABLE, &handle->supported_pflags);
ret = register_netdev(netdev);
if (ret) {
dev_err(priv->dev, "probe register netdev fail!\n");
@ -5943,12 +6024,16 @@ static void hns3_process_hw_error(struct hnae3_handle *handle,
if (hns3_hw_err[i].type == type) {
dev_err(&handle->pdev->dev, "Detected %s!\n",
hns3_hw_err[i].msg);
if (handle->ae_algo->ops->priv_ops)
handle->ae_algo->ops->priv_ops(handle,
HNAE3_EXT_OPC_EVENT_CALLBACK, &type,
sizeof(type));
break;
}
}
}
static const struct hnae3_client_ops client_ops = {
const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,

View File

@ -179,6 +179,11 @@ enum hns3_nic_state {
#define HNS3_TXD_DECTTL_S 12
#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
#define HNS3_TXD_FD_ADD_B 1
#define HNS3_TXD_FD_DEL_B 0
#define HNS3_TXD_FD_OP_M GENMASK(21, 20)
#define HNS3_TXD_FD_OP_S 20
#define HNS3_TXD_OL4CS_B 22
#define HNS3_TXD_MSS_S 0
@ -214,6 +219,8 @@ enum hns3_nic_state {
#define HNS3_CQ_MODE_EQE 1U
#define HNS3_CQ_MODE_CQE 0U
#define HNS3_FD_QB_FORCE_CNT_MAX 20
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,
HNS3_L2_TYPE_MULTICAST,
@ -285,7 +292,7 @@ struct __packed hns3_desc {
};
};
__le32 paylen_ol4cs;
__le32 paylen_fdop_ol4cs;
__le16 bdtp_fe_sc_vld_ra_ri;
__le16 mss_hw_csum;
} tx;
@ -398,6 +405,9 @@ enum hns3_pkt_ol4type {
HNS3_OL4_TYPE_UNKNOWN
};
#define IP_VERSION_IPV4 0x4
#define IP_VERSION_IPV6 0x6
struct hns3_rx_ptype {
u32 ptype : 8;
u32 csum_level : 2;
@ -754,4 +764,5 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
bool hns3_is_page_pool_enabled(void);
#endif

View File

@ -5,6 +5,7 @@
#include <linux/string.h>
#include <linux/phy.h>
#include <linux/sfp.h>
#include <net/page_pool/helpers.h>
#include "hns3_enet.h"
#include "hns3_ethtool.h"
@ -481,22 +482,38 @@ static void hns3_update_limit_promisc_mode(struct net_device *netdev,
hns3_request_update_promisc_mode(handle);
}
static void hns3_update_fd_qb_state(struct net_device *netdev, bool enable)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
if (!handle->ae_algo->ops->request_flush_qb_config)
return;
handle->ae_algo->ops->request_flush_qb_config(handle);
}
static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = {
{ "limit_promisc", hns3_update_limit_promisc_mode }
{ "limit_promisc", hns3_update_limit_promisc_mode },
{ "qb_enable", hns3_update_fd_qb_state },
};
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
int pp_stats_count = 0;
if (!ops->get_sset_count)
return -EOPNOTSUPP;
switch (stringset) {
case ETH_SS_STATS:
#ifdef CONFIG_PAGE_POOL_STATS
if (hns3_is_page_pool_enabled())
pp_stats_count = page_pool_ethtool_stats_get_count();
#endif
return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) +
ops->get_sset_count(h, stringset));
ops->get_sset_count(h, stringset) + pp_stats_count);
case ETH_SS_TEST:
return ops->get_sset_count(h, stringset);
@ -564,6 +581,10 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
#ifdef CONFIG_PAGE_POOL_STATS
if (hns3_is_page_pool_enabled())
buff = page_pool_ethtool_stats_get_strings(buff);
#endif
buff = hns3_get_strings_tqps(h, buff);
ops->get_strings(h, stringset, (u8 *)buff);
break;
@ -611,6 +632,28 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
return data;
}
#ifdef CONFIG_PAGE_POOL_STATS
static u64 *hns3_ethtool_pp_stats(struct hnae3_handle *handle, u64 *data)
{
struct hns3_nic_priv *priv = handle->priv;
int ring_num = handle->kinfo.num_tqps;
struct page_pool_stats stats = {0};
struct page_pool *page_pool;
int i;
if (!hns3_is_page_pool_enabled())
return data;
for (i = 0; i < ring_num; i++) {
page_pool = priv->ring[i + ring_num].page_pool;
if (page_pool)
page_pool_get_stats(page_pool, &stats);
}
return page_pool_ethtool_stats_get(data, &stats);
}
#endif
/* hns3_get_stats - get detail statistics.
* @netdev: net device
* @stats: statistics info.
@ -632,6 +675,10 @@ static void hns3_get_stats(struct net_device *netdev,
return;
}
#ifdef CONFIG_PAGE_POOL_STATS
p = hns3_ethtool_pp_stats(h, p);
#endif
h->ae_algo->ops->update_stats(h);
/* get per-queue stats */
@ -1798,6 +1845,14 @@ static int hns3_get_module_info(struct net_device *netdev,
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
break;
case SFF8024_ID_QSFP_DD:
case SFF8024_ID_QSFP_PLUS_CMIS:
modinfo->type = ETH_MODULE_SFF_8636;
if (sfp_type.flat_mem & HNS3_CMIS_FLAT_MEMORY)
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
else
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
netdev_err(netdev, "Optical module unknown: %#x\n",
sfp_type.type);

View File

@ -12,9 +12,11 @@ struct hns3_stats {
int stats_offset;
};
#define HNS3_CMIS_FLAT_MEMORY BIT(7)
struct hns3_sfp_type {
u8 type;
u8 ext_type;
u8 flat_mem;
};
struct hns3_pflag_desc {

View File

@ -0,0 +1,543 @@
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2023 Hisilicon Limited.
#include "hns3_ext.h"
int nic_netdev_match_check(struct net_device *ndev)
{
#define HNS3_DRIVER_NAME_LEN 5
struct ethtool_drvinfo drv_info;
struct hnae3_handle *h;
if (!ndev || !ndev->ethtool_ops ||
!ndev->ethtool_ops->get_drvinfo)
return -EINVAL;
ndev->ethtool_ops->get_drvinfo(ndev, &drv_info);
if (strncmp(drv_info.driver, "hns3", HNS3_DRIVER_NAME_LEN))
return -EINVAL;
h = hns3_get_handle(ndev);
if (h->flags & HNAE3_SUPPORT_VF)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(nic_netdev_match_check);
static int nic_invoke_pri_ops(struct net_device *ndev, int opcode,
void *data, size_t length)
{
struct hnae3_handle *h;
int ret;
if (nic_netdev_match_check(ndev))
return -ENODEV;
if ((!data && length) || (data && !length)) {
netdev_err(ndev, "failed to check data and length");
return -EINVAL;
}
h = hns3_get_handle(ndev);
if (!h->ae_algo->ops->priv_ops)
return -EOPNOTSUPP;
ret = h->ae_algo->ops->priv_ops(h, opcode, data, length);
if (ret)
netdev_err(ndev,
"failed to invoke pri ops, opcode = %#x, ret = %d\n",
opcode, ret);
return ret;
}
void nic_chip_recover_handler(struct net_device *ndev,
enum hnae3_event_type_custom event_t)
{
if (nic_netdev_match_check(ndev))
return;
dev_info(&ndev->dev, "reset type is %d!!\n", event_t);
if (event_t == HNAE3_PPU_POISON_CUSTOM)
event_t = HNAE3_FUNC_RESET_CUSTOM;
if (event_t != HNAE3_FUNC_RESET_CUSTOM &&
event_t != HNAE3_GLOBAL_RESET_CUSTOM &&
event_t != HNAE3_IMP_RESET_CUSTOM) {
dev_err(&ndev->dev, "reset type err!!\n");
return;
}
nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_RESET, &event_t, sizeof(event_t));
}
EXPORT_SYMBOL(nic_chip_recover_handler);
static int nic_check_pfc_storm_para(u32 dir, u32 enable, u32 period_ms,
u32 times, u32 recovery_period_ms)
{
if ((dir != HNS3_PFC_STORM_PARA_DIR_RX &&
dir != HNS3_PFC_STORM_PARA_DIR_TX) ||
(enable != HNS3_PFC_STORM_PARA_DISABLE &&
enable != HNS3_PFC_STORM_PARA_ENABLE))
return -EINVAL;
if (period_ms < HNS3_PFC_STORM_PARA_PERIOD_MIN ||
period_ms > HNS3_PFC_STORM_PARA_PERIOD_MAX ||
recovery_period_ms < HNS3_PFC_STORM_PARA_PERIOD_MIN ||
recovery_period_ms > HNS3_PFC_STORM_PARA_PERIOD_MAX ||
times <= 0)
return -EINVAL;
return 0;
}
int nic_set_pfc_storm_para(struct net_device *ndev, u32 dir, u32 enable,
u32 period_ms, u32 times, u32 recovery_period_ms)
{
struct hnae3_pfc_storm_para para;
if (nic_check_pfc_storm_para(dir, enable, period_ms, times,
recovery_period_ms)) {
pr_err("set pfc storm para failed because invalid input param.\n");
return -EINVAL;
}
para.dir = dir;
para.enable = enable;
para.period_ms = period_ms;
para.times = times;
para.recovery_period_ms = recovery_period_ms;
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_PFC_STORM_PARA,
&para, sizeof(para));
}
EXPORT_SYMBOL(nic_set_pfc_storm_para);
int nic_get_pfc_storm_para(struct net_device *ndev, u32 dir, u32 *enable,
u32 *period_ms, u32 *times, u32 *recovery_period_ms)
{
struct hnae3_pfc_storm_para para;
int ret;
if (!enable || !period_ms || !times || !recovery_period_ms ||
(dir != HNS3_PFC_STORM_PARA_DIR_RX &&
dir != HNS3_PFC_STORM_PARA_DIR_TX)) {
pr_err("get pfc storm para failed because invalid input param.\n");
return -EINVAL;
}
para.dir = dir;
ret = nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PFC_STORM_PARA,
&para, sizeof(para));
if (ret)
return ret;
*enable = para.enable;
*period_ms = para.period_ms;
*times = para.times;
*recovery_period_ms = para.recovery_period_ms;
return 0;
}
EXPORT_SYMBOL(nic_get_pfc_storm_para);
int nic_set_notify_pkt_param(struct net_device *ndev,
struct hnae3_notify_pkt_param *param)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_NOTIFY_PARAM,
param, sizeof(*param));
}
EXPORT_SYMBOL(nic_set_notify_pkt_param);
int nic_set_notify_pkt_start(struct net_device *ndev)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_NOTIFY_START, NULL, 0);
}
EXPORT_SYMBOL(nic_set_notify_pkt_start);
int nic_set_torus_param(struct net_device *ndev, struct hnae3_torus_param *param)
{
if (!param || (param->enable != 0 && param->enable != 1))
return -EINVAL;
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_TORUS_PARAM,
param, sizeof(*param));
}
EXPORT_SYMBOL(nic_set_torus_param);
int nic_get_torus_param(struct net_device *ndev, struct hnae3_torus_param *param)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_TORUS_PARAM,
param, sizeof(*param));
}
EXPORT_SYMBOL(nic_get_torus_param);
int nic_clean_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats)
{
struct hnae3_knic_private_info *kinfo;
struct hns3_enet_ring *ring;
struct hns3_nic_priv *priv;
struct hnae3_handle *h;
int i, ret;
if (nic_netdev_match_check(ndev))
return -ENODEV;
priv = netdev_priv(ndev);
h = hns3_get_handle(ndev);
kinfo = &h->kinfo;
rtnl_lock();
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
ret = -EBUSY;
goto end_unlock;
}
ret = nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_CLEAN_STATS64,
NULL, 0);
if (ret)
goto end_unlock;
for (i = 0; i < kinfo->num_tqps; i++) {
ring = &priv->ring[i];
memset(&ring->stats, 0, sizeof(struct ring_stats));
ring = &priv->ring[i + kinfo->num_tqps];
memset(&ring->stats, 0, sizeof(struct ring_stats));
}
memset(&ndev->stats, 0, sizeof(struct net_device_stats));
netdev_info(ndev, "clean stats succ\n");
end_unlock:
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL(nic_clean_stats64);
int nic_set_cpu_affinity(struct net_device *ndev, cpumask_t *affinity_mask)
{
struct hns3_enet_tqp_vector *tqp_vector;
struct hns3_nic_priv *priv;
int ret = 0;
u16 i;
if (nic_netdev_match_check(ndev))
return -ENODEV;
if (!affinity_mask) {
netdev_err(ndev,
"Invalid input param when set ethernet cpu affinity\n");
return -EINVAL;
}
priv = netdev_priv(ndev);
rtnl_lock();
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
ret = -EBUSY;
goto err_unlock;
}
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
netdev_err(ndev,
"ethernet is down, not support cpu affinity set\n");
ret = -ENETDOWN;
goto err_unlock;
}
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
if (tqp_vector->irq_init_flag != HNS3_VECTOR_INITED)
continue;
tqp_vector->affinity_mask = *affinity_mask;
ret = irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
if (ret) {
netdev_err(ndev,
"failed to reset affinity hint, ret = %d\n", ret);
goto err_unlock;
}
ret = irq_set_affinity_hint(tqp_vector->vector_irq,
&tqp_vector->affinity_mask);
if (ret) {
netdev_err(ndev,
"failed to set affinity hint, ret = %d\n", ret);
goto err_unlock;
}
}
netdev_info(ndev, "set nic cpu affinity %*pb succeed\n",
cpumask_pr_args(affinity_mask));
err_unlock:
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL(nic_set_cpu_affinity);
static int nic_get_ext_id_info(struct net_device *ndev,
struct hane3_port_ext_id_info *id_info)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_EXT_ID_INFO,
id_info, sizeof(*id_info));
}
int nic_get_chipid(struct net_device *ndev, u32 *chip_id)
{
struct hane3_port_ext_id_info info;
int ret;
if (!chip_id)
return -EINVAL;
ret = nic_get_ext_id_info(ndev, &info);
if (ret)
return ret;
*chip_id = info.chip_id;
return 0;
}
EXPORT_SYMBOL(nic_get_chipid);
int nic_get_mac_id(struct net_device *ndev, u32 *mac_id)
{
struct hane3_port_ext_id_info info;
int ret;
if (!mac_id)
return -EINVAL;
ret = nic_get_ext_id_info(ndev, &info);
if (ret)
return ret;
*mac_id = info.mac_id;
return 0;
}
EXPORT_SYMBOL(nic_get_mac_id);
int nic_get_io_die_id(struct net_device *ndev, u32 *io_die_id)
{
struct hane3_port_ext_id_info info;
int ret;
if (!io_die_id)
return -EINVAL;
ret = nic_get_ext_id_info(ndev, &info);
if (ret)
return ret;
*io_die_id = info.io_die_id;
return 0;
}
EXPORT_SYMBOL(nic_get_io_die_id);
static int nic_get_ext_num_info(struct net_device *ndev,
struct hane3_port_ext_num_info *num_info)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_EXT_NUM_INFO,
num_info, sizeof(*num_info));
}
int nic_get_chip_num(struct net_device *ndev, u32 *chip_num)
{
struct hane3_port_ext_num_info info;
int ret;
if (!chip_num)
return -EINVAL;
ret = nic_get_ext_num_info(ndev, &info);
if (ret)
return ret;
*chip_num = info.chip_num;
return 0;
}
EXPORT_SYMBOL(nic_get_chip_num);
int nic_get_io_die_num(struct net_device *ndev, u32 *io_die_num)
{
struct hane3_port_ext_num_info info;
int ret;
if (!io_die_num)
return -EINVAL;
ret = nic_get_ext_num_info(ndev, &info);
if (ret)
return ret;
*io_die_num = info.io_die_num;
return 0;
}
EXPORT_SYMBOL(nic_get_io_die_num);
int nic_get_port_num_of_die(struct net_device *ndev, u32 *port_num)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_NUM,
port_num, sizeof(*port_num));
}
EXPORT_SYMBOL(nic_get_port_num_of_die);
int nic_get_port_num_per_chip(struct net_device *ndev, u32 *port_num)
{
return nic_get_port_num_of_die(ndev, port_num);
}
EXPORT_SYMBOL(nic_get_port_num_per_chip);
int nic_set_tx_timeout(struct net_device *ndev, int tx_timeout)
{
if (nic_netdev_match_check(ndev))
return -ENODEV;
if (tx_timeout <= 0 || tx_timeout > HNS3_MAX_TX_TIMEOUT)
return -EINVAL;
ndev->watchdog_timeo = tx_timeout * HZ;
return 0;
}
EXPORT_SYMBOL(nic_set_tx_timeout);
int nic_get_sfp_present(struct net_device *ndev, int *present)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PRESENT,
present, sizeof(*present));
}
EXPORT_SYMBOL(nic_get_sfp_present);
int nic_set_sfp_state(struct net_device *ndev, bool en)
{
u32 state = en ? 1 : 0;
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_SFP_STATE,
&state, sizeof(state));
}
EXPORT_SYMBOL(nic_set_sfp_state);
int nic_disable_net_lane(struct net_device *ndev)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_DISABLE_LANE, NULL, 0);
}
EXPORT_SYMBOL(nic_disable_net_lane);
int nic_get_net_lane_status(struct net_device *ndev, u32 *status)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_LANE_STATUS,
status, sizeof(*status));
}
EXPORT_SYMBOL(nic_get_net_lane_status);
int nic_disable_clock(struct net_device *ndev)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_DISABLE_CLOCK,
NULL, 0);
}
EXPORT_SYMBOL(nic_disable_clock);
int nic_set_pfc_time_cfg(struct net_device *ndev, u16 time)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_PFC_TIME,
&time, sizeof(time));
}
EXPORT_SYMBOL(nic_set_pfc_time_cfg);
int nic_get_port_fault_status(struct net_device *ndev, u32 fault_type, u32 *status)
{
int opcode = HNAE3_EXT_OPC_GET_PORT_FAULT_STATUS;
struct hnae3_port_fault fault_para;
int ret;
if (!status)
return -EINVAL;
if (fault_type == HNAE3_FAULT_TYPE_HILINK_REF_LOS)
opcode = HNAE3_EXT_OPC_GET_HILINK_REF_LOS;
fault_para.fault_type = fault_type;
ret = nic_invoke_pri_ops(ndev, opcode, &fault_para, sizeof(fault_para));
if (ret)
return ret;
*status = fault_para.fault_status;
return 0;
}
EXPORT_SYMBOL(nic_get_port_fault_status);
int nic_get_port_wire_type(struct net_device *ndev, u32 *wire_type)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_TYPE,
wire_type, sizeof(*wire_type));
}
EXPORT_SYMBOL(nic_get_port_wire_type);
int nic_set_mac_state(struct net_device *ndev, int enable)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_MAC_STATE,
&enable, sizeof(enable));
}
EXPORT_SYMBOL(nic_set_mac_state);
int nic_set_led(struct net_device *ndev, u32 type, u32 status)
{
struct hnae3_led_state_para para;
para.status = status;
para.type = type;
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_LED,
&para, sizeof(para));
}
EXPORT_SYMBOL(nic_set_led);
int nic_get_led_signal(struct net_device *ndev, struct hnae3_lamp_signal *signal)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_LED_SIGNAL,
signal, sizeof(*signal));
}
EXPORT_SYMBOL(nic_get_led_signal);
int nic_get_phy_reg(struct net_device *ndev, u32 page_select_addr,
u16 page, u32 reg_addr, u16 *data)
{
struct hnae3_phy_para para;
int ret;
if (!data)
return -EINVAL;
para.page_select_addr = page_select_addr;
para.page = page;
para.reg_addr = reg_addr;
ret = nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PHY_REG,
&para, sizeof(para));
if (ret)
return ret;
*data = para.data;
return 0;
}
EXPORT_SYMBOL(nic_get_phy_reg);
int nic_set_phy_reg(struct net_device *ndev, u32 page_select_addr,
u16 page, u32 reg_addr, u16 data)
{
struct hnae3_phy_para para;
para.page_select_addr = page_select_addr;
para.page = page;
para.reg_addr = reg_addr;
para.data = data;
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_PHY_REG,
&para, sizeof(para));
}
EXPORT_SYMBOL(nic_set_phy_reg);

View File

@ -0,0 +1,67 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2023 Hisilicon Limited. */
#ifndef __HNS3_EXT_H
#define __HNS3_EXT_H
#include <linux/types.h>
#include "hns3_enet.h"
#include "hnae3_ext.h"
#define HNS3_PFC_STORM_PARA_DIR_RX 0
#define HNS3_PFC_STORM_PARA_DIR_TX 1
#define HNS3_PFC_STORM_PARA_DISABLE 0
#define HNS3_PFC_STORM_PARA_ENABLE 1
#define HNS3_PFC_STORM_PARA_PERIOD_MIN 5
#define HNS3_PFC_STORM_PARA_PERIOD_MAX 2000
#define HNS3_MAX_TX_TIMEOUT 600
#define nic_set_8211_phy_reg nic_set_phy_reg
#define nic_get_8211_phy_reg nic_get_phy_reg
#define nic_set_8521_phy_reg(ndev, page, reg_addr, data) \
nic_set_phy_reg(ndev, 0, page, reg_addr, data)
#define nic_get_8521_phy_reg(ndev, page, reg_addr, data) \
nic_get_phy_reg(ndev, 0, page, reg_addr, data)
#define nic_get_cdr_flash_status(ndev, status) \
nic_get_port_fault_status(ndev, HNAE3_FAULT_TYPE_CDR_FLASH, status)
#define nic_get_hilink_ref_los(ndev, status) \
nic_get_port_fault_status(ndev, HNAE3_FAULT_TYPE_HILINK_REF_LOS, status)
int nic_netdev_match_check(struct net_device *netdev);
void nic_chip_recover_handler(struct net_device *ndev,
enum hnae3_event_type_custom event_t);
int nic_set_pfc_storm_para(struct net_device *ndev, u32 dir, u32 enable,
u32 period_ms, u32 times, u32 recovery_period_ms);
int nic_get_pfc_storm_para(struct net_device *ndev, u32 dir, u32 *enable,
u32 *period_ms, u32 *times, u32 *recovery_period_ms);
int nic_set_notify_pkt_param(struct net_device *ndev,
struct hnae3_notify_pkt_param *param);
int nic_set_notify_pkt_start(struct net_device *ndev);
int nic_set_torus_param(struct net_device *ndev, struct hnae3_torus_param *param);
int nic_get_torus_param(struct net_device *ndev, struct hnae3_torus_param *param);
int nic_clean_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
int nic_set_cpu_affinity(struct net_device *ndev, cpumask_t *affinity_mask);
int nic_get_chipid(struct net_device *ndev, u32 *chip_id);
int nic_get_mac_id(struct net_device *ndev, u32 *mac_id);
int nic_get_io_die_id(struct net_device *ndev, u32 *io_die_id);
int nic_get_chip_num(struct net_device *ndev, u32 *chip_num);
int nic_get_io_die_num(struct net_device *ndev, u32 *io_die_num);
int nic_get_port_num_of_die(struct net_device *ndev, u32 *port_num);
int nic_get_port_num_per_chip(struct net_device *ndev, u32 *port_num);
int nic_set_tx_timeout(struct net_device *ndev, int tx_timeout);
int nic_get_sfp_present(struct net_device *ndev, int *present);
int nic_set_sfp_state(struct net_device *ndev, bool en);
int nic_disable_net_lane(struct net_device *ndev);
int nic_get_net_lane_status(struct net_device *ndev, u32 *status);
int nic_disable_clock(struct net_device *ndev);
int nic_set_pfc_time_cfg(struct net_device *ndev, u16 time);
int nic_get_port_fault_status(struct net_device *ndev, u32 fault_type, u32 *status);
int nic_get_port_wire_type(struct net_device *ndev, u32 *wire_type);
int nic_set_mac_state(struct net_device *ndev, int enable);
int nic_set_led(struct net_device *ndev, u32 type, u32 status);
int nic_get_led_signal(struct net_device *ndev, struct hnae3_lamp_signal *signal);
int nic_get_phy_reg(struct net_device *ndev, u32 page_select_addr,
u16 page, u32 reg_addr, u16 *data);
int nic_set_phy_reg(struct net_device *ndev, u32 page_select_addr,
u16 page, u32 reg_addr, u16 data);
#endif

View File

@ -354,6 +354,12 @@ struct hclge_sfp_info_cmd {
u8 rsv[6];
};
struct hclge_port_fault_cmd {
__le32 fault_status;
__le32 port_type;
u8 rsv[16];
};
#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
#define HCLGE_MAC_CFG_FEC_MODE_S 1
#define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1)
@ -727,11 +733,11 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_DROP_B 0
#define HCLGE_FD_AD_DIRECT_QID_B 1
#define HCLGE_FD_AD_QID_S 2
#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
#define HCLGE_FD_AD_QID_L_S 2
#define HCLGE_FD_AD_QID_L_M GENMASK(11, 2)
#define HCLGE_FD_AD_USE_COUNTER_B 12
#define HCLGE_FD_AD_COUNTER_NUM_S 13
#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
#define HCLGE_FD_AD_COUNTER_NUM_L_S 13
#define HCLGE_FD_AD_COUNTER_NUM_L_M GENMASK(19, 13)
#define HCLGE_FD_AD_NXT_STEP_B 20
#define HCLGE_FD_AD_NXT_KEY_S 21
#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)
@ -741,6 +747,8 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_TC_OVRD_B 16
#define HCLGE_FD_AD_TC_SIZE_S 17
#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17)
#define HCLGE_FD_AD_QID_H_B 21
#define HCLGE_FD_AD_COUNTER_NUM_H_B 26
struct hclge_fd_ad_config_cmd {
u8 stage;
@ -758,6 +766,24 @@ struct hclge_fd_ad_cnt_read_cmd {
u8 rsv2[8];
};
struct hclge_fd_qb_cfg_cmd {
u8 en;
u8 vf_id;
u8 rsv[22];
};
#define HCLGE_FD_QB_AD_RULE_ID_VLD_B 0
#define HCLGE_FD_QB_AD_COUNTER_VLD_B 1
struct hclge_fd_qb_ad_cmd {
u8 vf_id;
u8 rsv1;
u8 ad_sel;
u8 rsv2;
__le16 hit_rule_id;
u8 counter_id;
u8 rsv3[17];
};
#define HCLGE_FD_USER_DEF_OFT_S 0
#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0)
#define HCLGE_FD_USER_DEF_EN_B 15
@ -828,7 +854,8 @@ struct hclge_dev_specs_1_cmd {
__le16 mc_mac_size;
u8 rsv1[6];
u8 tnl_num;
u8 rsv2[5];
u8 hilink_version;
u8 rsv2[4];
};
/* mac speed type defined in firmware command */
@ -867,11 +894,17 @@ struct hclge_phy_link_ksetting_1_cmd {
u8 rsv[22];
};
#define HCLGE_PHY_RW_DIRECTLY 0
#define HCLGE_PHY_RW_WITH_PAGE 1
struct hclge_phy_reg_cmd {
__le16 reg_addr;
u8 rsv0[2];
__le16 reg_val;
u8 rsv1[18];
u8 rsv1[2];
u8 type;
u8 dev_addr;
__le16 page;
u8 rsv2[12];
};
struct hclge_wol_cfg_cmd {

View File

@ -619,6 +619,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
return ret;
}
kinfo->tc_info.mqprio_destroy = !tc;
ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;

View File

@ -161,10 +161,8 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
return 0;
}
static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
struct hclge_desc *desc_src,
int index, int bd_num,
enum hclge_opcode_type cmd)
int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
int index, int bd_num, enum hclge_opcode_type cmd)
{
struct hclge_desc *desc = desc_src;
int ret, i;
@ -1510,8 +1508,7 @@ static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_DBG_TCAM_BUF_SIZE 256
static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
char *tcam_buf,
struct hclge_dbg_tcam_msg tcam_msg)
char *tcam_buf, u8 stage, u32 loc)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
@ -1531,9 +1528,9 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
req1->stage = tcam_msg.stage;
req1->stage = stage;
req1->xy_sel = sel_x ? 1 : 0;
req1->index = cpu_to_le32(tcam_msg.loc);
req1->index = cpu_to_le32(loc);
ret = hclge_cmd_send(&hdev->hw, desc, 3);
if (ret)
@ -1541,7 +1538,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
"read result tcam key %s(%u):\n", sel_x ? "x" : "y",
tcam_msg.loc);
loc);
/* tcam_data0 ~ tcam_data1 */
req = (__le32 *)req1->tcam_data;
@ -1586,7 +1583,6 @@ static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
{
u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
struct hclge_dbg_tcam_msg tcam_msg;
int i, ret, rule_cnt;
u16 *rule_locs;
char *tcam_buf;
@ -1621,10 +1617,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
ret = 0;
for (i = 0; i < rule_cnt; i++) {
tcam_msg.stage = HCLGE_FD_STAGE_1;
tcam_msg.loc = rule_locs[i];
ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, HCLGE_FD_STAGE_1, rule_locs[i]);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key x, ret = %d\n", ret);
@ -1633,7 +1626,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, HCLGE_FD_STAGE_1, rule_locs[i]);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key y, ret = %d\n", ret);
@ -1649,6 +1642,86 @@ out:
return ret;
}
static int hclge_query_rules_valid(struct hclge_dev *hdev, u8 stage, u32 loc)
{
#define HCLGE_TCAM_SELECTION_X 1
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
struct hclge_fd_tcam_config_3_cmd *req3;
struct hclge_desc desc[3];
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
req1->stage = stage;
req1->xy_sel = HCLGE_TCAM_SELECTION_X;
req1->index = cpu_to_le32(loc);
ret = hclge_cmd_send(&hdev->hw, desc, 3);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to read tcam status, ret = %d\n", ret);
return ret;
}
return req1->entry_vld;
}
static int hclge_dbg_dump_qb_tcam(struct hclge_dev *hdev, char *buf, int len)
{
char *tcam_buf;
int pos = 0;
int ret = 0;
u32 i;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
return -EOPNOTSUPP;
}
tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
if (!tcam_buf)
return -ENOMEM;
for (i = 0; i < hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; i++) {
if (hclge_query_rules_valid(hdev, HCLGE_FD_STAGE_1, i) <= 0)
continue;
ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf,
HCLGE_FD_STAGE_1, i);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get qb tcam key x, ret = %d\n", ret);
goto out;
}
pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf,
HCLGE_FD_STAGE_1, i);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get qb tcam key y, ret = %d\n", ret);
goto out;
}
pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
}
out:
kfree(tcam_buf);
return ret;
}
static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
{
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
@ -2400,6 +2473,14 @@ static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
static int hclge_dbg_dump_tcam(struct hclge_dev *hdev, char *buf, int len)
{
if (test_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state))
return hclge_dbg_dump_qb_tcam(hdev, buf, len);
else
return hclge_dbg_dump_fd_tcam(hdev, buf, len);
};
static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
{
hclge_dbg_dump_mac_list(hdev, buf, len, true);
@ -2539,14 +2620,14 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.cmd = HNAE3_DBG_CMD_REG_DCB,
.dbg_dump = hclge_dbg_dump_dcb,
},
{
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dbg_dump = hclge_dbg_dump_fd_tcam,
},
{
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
.dbg_dump = hclge_dbg_dump_mac_tnl_status,
},
{
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dbg_dump = hclge_dbg_dump_tcam,
},
{
.cmd = HNAE3_DBG_CMD_SERV_INFO,
.dbg_dump = hclge_dbg_dump_serv_info,

View File

@ -69,11 +69,6 @@ struct hclge_dbg_reg_common_msg {
enum hclge_opcode_type cmd;
};
struct hclge_dbg_tcam_msg {
u8 stage;
u32 loc;
};
#define HCLGE_DBG_MAX_DFX_MSG_LEN 60
struct hclge_dbg_dfx_message {
int flag;
@ -771,4 +766,7 @@ struct hclge_dbg_vlan_cfg {
u8 pri_only2;
};
int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
int index, int bd_num, enum hclge_opcode_type cmd);
#endif

View File

@ -5,6 +5,34 @@
#include "hclge_devlink.h"
static int hclge_devlink_scc_info_get(struct devlink *devlink,
struct devlink_info_req *req)
{
struct hclge_devlink_priv *priv = devlink_priv(devlink);
char scc_version[HCLGE_DEVLINK_FW_SCC_LEN];
struct hclge_dev *hdev = priv->hdev;
u32 scc_version_tmp;
int ret;
ret = hclge_query_scc_version(hdev, &scc_version_tmp);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get scc version, ret = %d\n", ret);
return ret;
}
snprintf(scc_version, sizeof(scc_version), "%lu.%lu.%lu.%lu",
hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT),
hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT),
hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT),
hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
return devlink_info_version_running_put(req, "fw.scc", scc_version);
}
static int hclge_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@ -13,6 +41,7 @@ static int hclge_devlink_info_get(struct devlink *devlink,
struct hclge_devlink_priv *priv = devlink_priv(devlink);
char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
struct hclge_dev *hdev = priv->hdev;
int ret;
snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
@ -24,9 +53,18 @@ static int hclge_devlink_info_get(struct devlink *devlink,
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
return devlink_info_version_running_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW,
version_str);
ret = devlink_info_version_running_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW,
version_str);
if (ret) {
dev_err(&hdev->pdev->dev, "failed to set running version of fw\n");
return ret;
}
if (hdev->pdev->revision > HNAE3_DEVICE_VERSION_V2)
ret = hclge_devlink_scc_info_get(devlink, req);
return ret;
}
static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change,

View File

@ -6,6 +6,8 @@
#include "hclge_main.h"
#define HCLGE_DEVLINK_FW_SCC_LEN 32
struct hclge_devlink_priv {
struct hclge_dev *hdev;
};

View File

@ -1198,6 +1198,426 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
}
};
static const struct hclge_mod_reg_info hclge_ssu_reg_0_info[] = {
{
.reg_name = "SSU_BP_STATUS_0~5",
.reg_offset_group = {5, 6, 7, 8, 9, 10},
.group_size = 6
}, {
.reg_name = "LO_PRI_UNICAST_CUR_CNT",
.reg_offset_group = {54},
.group_size = 1
}, {
.reg_name = "HI/LO_PRI_MULTICAST_CUR_CNT",
.reg_offset_group = {55, 56},
.group_size = 2
}, {
.reg_name = "SSU_MB_RD_RLT_DROP_CNT",
.reg_offset_group = {29},
.group_size = 1
}, {
.reg_name = "SSU_PPP_MAC_KEY_NUM",
.reg_offset_group = {31, 30},
.group_size = 2
}, {
.reg_name = "SSU_PPP_HOST_KEY_NUM",
.reg_offset_group = {33, 32},
.group_size = 2
}, {
.reg_name = "PPP_SSU_MAC/HOST_RLT_NUM",
.reg_offset_group = {35, 34, 37, 36},
.group_size = 4
}, {
.reg_name = "FULL/PART_DROP_NUM",
.reg_offset_group = {18, 19},
.group_size = 2
}, {
.reg_name = "PPP_KEY/RLT_DROP_NUM",
.reg_offset_group = {20, 21},
.group_size = 2
}, {
.reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT",
.reg_offset_group = {48, 49},
.group_size = 2
}, {
.reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT_RX",
.reg_offset_group = {50, 51},
.group_size = 2
},
};
static const struct hclge_mod_reg_info hclge_ssu_reg_1_info[] = {
{
.reg_name = "RX_PACKET_IN/OUT_CNT",
.reg_offset_group = {13, 12, 15, 14},
.group_size = 4
}, {
.reg_name = "TX_PACKET_IN/OUT_CNT",
.reg_offset_group = {17, 16, 19, 18},
.group_size = 4
}, {
.reg_name = "RX_PACKET_TC0_IN/OUT_CNT",
.reg_offset_group = {25, 24, 41, 40},
.group_size = 4
}, {
.reg_name = "RX_PACKET_TC1_IN/OUT_CNT",
.reg_offset_group = {27, 26, 43, 42},
.group_size = 4
}, {
.reg_name = "RX_PACKET_TC2_IN/OUT_CNT",
.reg_offset_group = {29, 28, 45, 44},
.group_size = 4
}, {
.reg_name = "RX_PACKET_TC3_IN/OUT_CNT",
.reg_offset_group = {31, 30, 47, 46},
.group_size = 4
}, {
.reg_name = "RX_PACKET_TC4_IN/OUT_CNT",
.reg_offset_group = {33, 32, 49, 48},
.group_size = 4
}, {
.reg_name = "RX_PACKET_TC5_IN/OUT_CNT",
.reg_offset_group = {35, 34, 51, 50},
.group_size = 4
}, {
.reg_name = "RX_PACKET_TC6_IN/OUT_CNT",
.reg_offset_group = {37, 36, 53, 52},
.group_size = 4
}, {
.reg_name = "RX_PACKET_TC7_IN/OUT_CNT",
.reg_offset_group = {39, 38, 55, 54},
.group_size = 4
}, {
.reg_name = "TX_PACKET_TC0_IN/OUT_CNT",
.reg_offset_group = {57, 56, 73, 72},
.group_size = 4
}, {
.reg_name = "TX_PACKET_TC1_IN/OUT_CNT",
.reg_offset_group = {59, 58, 75, 74},
.group_size = 4
}, {
.reg_name = "TX_PACKET_TC2_IN/OUT_CNT",
.reg_offset_group = {61, 60, 77, 76},
.group_size = 4
}, {
.reg_name = "TX_PACKET_TC3_IN/OUT_CNT",
.reg_offset_group = {63, 62, 79, 78},
.group_size = 4
}, {
.reg_name = "TX_PACKET_TC4_IN/OUT_CNT",
.reg_offset_group = {65, 64, 81, 80},
.group_size = 4
}, {
.reg_name = "TX_PACKET_TC5_IN/OUT_CNT",
.reg_offset_group = {67, 66, 83, 82},
.group_size = 4
}, {
.reg_name = "TX_PACKET_TC6_IN/OUT_CNT",
.reg_offset_group = {69, 68, 85, 84},
.group_size = 4
}, {
.reg_name = "TX_PACKET_TC7_IN/OUT_CNT",
.reg_offset_group = {71, 70, 87, 86},
.group_size = 4
}, {
.reg_name = "PACKET_TC0~3_CURR_BUFFER_CNT",
.reg_offset_group = {1, 2, 3, 4},
.group_size = 4
}, {
.reg_name = "PACKET_TC4~7_CURR_BUFFER_CNT",
.reg_offset_group = {5, 6, 7, 8},
.group_size = 4
}, {
.reg_name = "ROC_RX_PACKET_IN_CNT",
.reg_offset_group = {21, 20},
.group_size = 2
}, {
.reg_name = "ROC_TX_PACKET_OUT_CNT",
.reg_offset_group = {23, 22},
.group_size = 2
}
};
static const struct hclge_mod_reg_info hclge_rpu_reg_0_info[] = {
{
.reg_name = "RPU_FSM_DFX_ST0/ST1_TNL",
.has_suffix = true,
.reg_offset_group = {1, 2},
.group_size = 2
}, {
.reg_name = "RPU_RX_PKT_DROP_CNT_TNL",
.has_suffix = true,
.reg_offset_group = {3},
.group_size = 1
}
};
static const struct hclge_mod_reg_info hclge_rpu_reg_1_info[] = {
{
.reg_name = "FIFO_DFX_ST0_1_2_4",
.reg_offset_group = {1, 2, 3, 5},
.group_size = 4
}
};
static const struct hclge_mod_reg_info hclge_igu_egu_reg_info[] = {
{
.reg_name = "IGU_RX_ERR_PKT",
.reg_offset_group = {1},
.group_size = 1
}, {
.reg_name = "IGU_RX_OUT_ALL_PKT",
.reg_offset_group = {29, 28},
.group_size = 2
}, {
.reg_name = "EGU_TX_OUT_ALL_PKT",
.reg_offset_group = {39, 38},
.group_size = 2
}, {
.reg_name = "EGU_TX_ERR_PKT",
.reg_offset_group = {5},
.group_size = 1
}
};
static const struct hclge_mod_reg_info hclge_gen_reg_info_tnl[] = {
{
.reg_name = "SSU2RPU_TNL_WR_PKT_CNT_TNL",
.has_suffix = true,
.reg_offset_group = {1},
.group_size = 1
}, {
.reg_name = "RPU2HST_TNL_WR_PKT_CNT_TNL",
.has_suffix = true,
.reg_offset_group = {12},
.group_size = 1
}
};
static const struct hclge_mod_reg_info hclge_gen_reg_info[] = {
{
.reg_name = "SSU_OVERSIZE_DROP_CNT",
.reg_offset_group = {12},
.group_size = 1
}, {
.reg_name = "ROCE_RX_BYPASS_5NS_DROP_NUM",
.reg_offset_group = {13},
.group_size = 1
}, {
.reg_name = "RX_PKT_IN/OUT_ERR_CNT",
.reg_offset_group = {15, 14, 19, 18},
.group_size = 4
}, {
.reg_name = "TX_PKT_IN/OUT_ERR_CNT",
.reg_offset_group = {17, 16, 21, 20},
.group_size = 4
}, {
.reg_name = "ETS_TC_READY",
.reg_offset_group = {22},
.group_size = 1
}, {
.reg_name = "MIB_TX/RX_BAD_PKTS",
.reg_offset_group = {19, 18, 29, 28},
.group_size = 4
}, {
.reg_name = "MIB_TX/RX_GOOD_PKTS",
.reg_offset_group = {21, 20, 31, 30},
.group_size = 4
}, {
.reg_name = "MIB_TX/RX_TOTAL_PKTS",
.reg_offset_group = {23, 22, 33, 32},
.group_size = 4
}, {
.reg_name = "MIB_TX/RX_PAUSE_PKTS",
.reg_offset_group = {25, 24, 35, 34},
.group_size = 4
}, {
.reg_name = "MIB_TX_ERR_ALL_PKTS",
.reg_offset_group = {27, 26},
.group_size = 2
}, {
.reg_name = "MIB_RX_FCS_ERR_PKTS",
.reg_offset_group = {37, 36},
.group_size = 2
}, {
.reg_name = "IGU_EGU_AUTO_GATE_EN",
.reg_offset_group = {42},
.group_size = 1
}, {
.reg_name = "IGU_EGU_INT_SRC",
.reg_offset_group = {43},
.group_size = 1
}, {
.reg_name = "EGU_READY_NUM_CFG",
.reg_offset_group = {44},
.group_size = 1
}, {
.reg_name = "IGU_EGU_TNL_DFX",
.reg_offset_group = {45},
.group_size = 1
}, {
.reg_name = "TX_TNL_NOTE_PKT",
.reg_offset_group = {46},
.group_size = 1
}
};
static const struct hclge_mod_reg_common_msg hclge_ssu_reg_common_msg[] = {
{
.cmd = HCLGE_OPC_DFX_SSU_REG_0,
.result_regs = hclge_ssu_reg_0_info,
.bd_num = HCLGE_BD_NUM_SSU_REG_0,
.result_regs_size = ARRAY_SIZE(hclge_ssu_reg_0_info)
}, {
.cmd = HCLGE_OPC_DFX_SSU_REG_1,
.result_regs = hclge_ssu_reg_1_info,
.bd_num = HCLGE_BD_NUM_SSU_REG_1,
.result_regs_size = ARRAY_SIZE(hclge_ssu_reg_1_info)
}, {
.cmd = HCLGE_OPC_DFX_RPU_REG_0,
.result_regs = hclge_rpu_reg_0_info,
.bd_num = HCLGE_BD_NUM_RPU_REG_0,
.result_regs_size = ARRAY_SIZE(hclge_rpu_reg_0_info),
.need_para = true
}, {
.cmd = HCLGE_OPC_DFX_RPU_REG_1,
.result_regs = hclge_rpu_reg_1_info,
.bd_num = HCLGE_BD_NUM_RPU_REG_1,
.result_regs_size = ARRAY_SIZE(hclge_rpu_reg_1_info)
}, {
.cmd = HCLGE_OPC_DFX_IGU_EGU_REG,
.result_regs = hclge_igu_egu_reg_info,
.bd_num = HCLGE_BD_NUM_IGU_EGU_REG,
.result_regs_size = ARRAY_SIZE(hclge_igu_egu_reg_info)
}, {
.cmd = HCLGE_OPC_DFX_GEN_REG,
.result_regs = hclge_gen_reg_info_tnl,
.bd_num = HCLGE_BD_NUM_GEN_REG,
.result_regs_size = ARRAY_SIZE(hclge_gen_reg_info_tnl),
.need_para = true
}, {
.cmd = HCLGE_OPC_DFX_GEN_REG,
.result_regs = hclge_gen_reg_info,
.bd_num = HCLGE_BD_NUM_GEN_REG,
.result_regs_size = ARRAY_SIZE(hclge_gen_reg_info)
}
};
static int
hclge_print_mod_reg_info(struct device *dev, struct hclge_desc *desc,
const struct hclge_mod_reg_info *reg_info, int size)
{
int i, j, pos, actual_len;
u8 offset, bd_idx, index;
char *buf;
buf = kzalloc(HCLGE_MOD_REG_INFO_LEN_MAX, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (i = 0; i < size; i++) {
actual_len = strlen(reg_info[i].reg_name) +
HCLGE_MOD_REG_EXTRA_LEN +
HCLGE_MOD_REG_VALUE_LEN * reg_info[i].group_size;
if (actual_len > HCLGE_MOD_REG_INFO_LEN_MAX) {
dev_info(dev, "length of reg(%s) is invalid, len=%d\n",
reg_info[i].reg_name, actual_len);
continue;
}
pos = scnprintf(buf, HCLGE_MOD_REG_INFO_LEN_MAX, "%s",
reg_info[i].reg_name);
if (reg_info[i].has_suffix)
pos += scnprintf(buf + pos,
HCLGE_MOD_REG_INFO_LEN_MAX - pos, "%u",
le32_to_cpu(desc->data[0]));
pos += scnprintf(buf + pos,
HCLGE_MOD_REG_INFO_LEN_MAX - pos,
":");
for (j = 0; j < reg_info[i].group_size; j++) {
offset = reg_info[i].reg_offset_group[j];
index = offset % HCLGE_DESC_DATA_LEN;
bd_idx = offset / HCLGE_DESC_DATA_LEN;
pos += scnprintf(buf + pos,
HCLGE_MOD_REG_INFO_LEN_MAX - pos,
" %08x",
le32_to_cpu(desc[bd_idx].data[index]));
}
buf[pos] = '\0';
dev_info(dev, "%s\n", buf);
}
kfree(buf);
return 0;
}
static bool hclge_err_mod_check_support_cmd(enum hclge_opcode_type opcode,
struct hclge_dev *hdev)
{
if (opcode == HCLGE_OPC_DFX_GEN_REG &&
!hnae3_ae_dev_gen_reg_dfx_supported(hdev))
return false;
return true;
}
/* For each common msg, send cmdq to IMP and print result reg info.
* If there is a parameter, loop it and request.
*/
static void
hclge_query_reg_info(struct hclge_dev *hdev,
struct hclge_mod_reg_common_msg *msg, u32 loop_time,
u32 *loop_para)
{
int desc_len, i, ret;
desc_len = msg->bd_num * sizeof(struct hclge_desc);
msg->desc = kzalloc(desc_len, GFP_KERNEL);
if (!msg->desc) {
dev_err(&hdev->pdev->dev, "failed to query reg info, ret=%d",
-ENOMEM);
return;
}
for (i = 0; i < loop_time; i++) {
ret = hclge_dbg_cmd_send(hdev, msg->desc, *loop_para,
msg->bd_num, msg->cmd);
loop_para++;
if (ret)
continue;
ret = hclge_print_mod_reg_info(&hdev->pdev->dev, msg->desc,
msg->result_regs,
msg->result_regs_size);
if (ret)
dev_err(&hdev->pdev->dev, "failed to print mod reg info, ret=%d\n",
ret);
}
kfree(msg->desc);
}
static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
{
u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0};
struct hclge_mod_reg_common_msg msg;
u8 i, j, num;
u32 loop_time;
num = ARRAY_SIZE(hclge_ssu_reg_common_msg);
for (i = 0; i < num; i++) {
msg = hclge_ssu_reg_common_msg[i];
if (!hclge_err_mod_check_support_cmd(msg.cmd, hdev))
continue;
loop_time = 1;
loop_para[0] = 0;
if (msg.need_para) {
loop_time = hdev->ae_dev->dev_specs.tnl_num;
for (j = 0; j < loop_time; j++)
loop_para[j] = j + 1;
}
hclge_query_reg_info(hdev, &msg, loop_time, loop_para);
}
}
static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
{
.module_id = MODULE_NONE,
@ -1210,7 +1630,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.msg = "MODULE_GE"
}, {
.module_id = MODULE_IGU_EGU,
.msg = "MODULE_IGU_EGU"
.msg = "MODULE_IGU_EGU",
.query_reg_info = hclge_query_reg_info_of_ssu
}, {
.module_id = MODULE_LGE,
.msg = "MODULE_LGE"
@ -1231,7 +1652,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.msg = "MODULE_RTC"
}, {
.module_id = MODULE_SSU,
.msg = "MODULE_SSU"
.msg = "MODULE_SSU",
.query_reg_info = hclge_query_reg_info_of_ssu
}, {
.module_id = MODULE_TM,
.msg = "MODULE_TM"
@ -1301,10 +1723,12 @@ static const struct hclge_hw_type_id hclge_hw_type_id_st[] = {
.msg = "tqp_int_ecc_error"
}, {
.type_id = PF_ABNORMAL_INT_ERROR,
.msg = "pf_abnormal_int_error"
.msg = "pf_abnormal_int_error",
.cause_by_vf = true
}, {
.type_id = MPF_ABNORMAL_INT_ERROR,
.msg = "mpf_abnormal_int_error"
.msg = "mpf_abnormal_int_error",
.cause_by_vf = true
}, {
.type_id = COMMON_ERROR,
.msg = "common_error"
@ -2759,8 +3183,8 @@ void hclge_handle_occurred_error(struct hclge_dev *hdev)
hclge_handle_error_info_log(ae_dev);
}
static void
hclge_handle_error_type_reg_log(struct device *dev,
static bool
hclge_handle_error_type_reg_log(struct hclge_dev *hdev,
struct hclge_mod_err_info *mod_info,
struct hclge_type_reg_err_info *type_reg_info)
{
@ -2768,8 +3192,10 @@ hclge_handle_error_type_reg_log(struct device *dev,
#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7
u8 mod_id, total_module, type_id, total_type, i, is_ras;
struct device *dev = &hdev->pdev->dev;
u8 index_module = MODULE_NONE;
u8 index_type = NONE_ERROR;
bool cause_by_vf = false;
mod_id = mod_info->mod_id;
type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK;
@ -2788,6 +3214,7 @@ hclge_handle_error_type_reg_log(struct device *dev,
for (i = 0; i < total_type; i++) {
if (type_id == hclge_hw_type_id_st[i].type_id) {
index_type = i;
cause_by_vf = hclge_hw_type_id_st[i].cause_by_vf;
break;
}
}
@ -2805,6 +3232,11 @@ hclge_handle_error_type_reg_log(struct device *dev,
dev_err(dev, "reg_value:\n");
for (i = 0; i < type_reg_info->reg_num; i++)
dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]);
if (hclge_hw_module_id_st[index_module].query_reg_info)
hclge_hw_module_id_st[index_module].query_reg_info(hdev);
return cause_by_vf;
}
static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
@ -2815,6 +3247,7 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
struct device *dev = &hdev->pdev->dev;
struct hclge_mod_err_info *mod_info;
struct hclge_sum_err_info *sum_info;
bool cause_by_vf = false;
u8 mod_num, err_num, i;
u32 offset = 0;
@ -2843,12 +3276,16 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
type_reg_info = (struct hclge_type_reg_err_info *)
&buf[offset++];
hclge_handle_error_type_reg_log(dev, mod_info,
type_reg_info);
if (hclge_handle_error_type_reg_log(hdev, mod_info,
type_reg_info))
cause_by_vf = true;
offset += type_reg_info->reg_num;
}
}
if (hnae3_ae_dev_vf_fault_supported(hdev->ae_dev) && cause_by_vf)
set_bit(HNAE3_VF_EXP_RESET, &ae_dev->hw_err_reset_req);
}
static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num)
@ -2940,3 +3377,98 @@ err_desc:
out:
return ret;
}
static bool hclge_reset_vf_in_bitmap(struct hclge_dev *hdev,
unsigned long *bitmap)
{
struct hclge_vport *vport;
bool exist_set = false;
int func_id;
int ret;
func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM);
if (func_id == PF_VPORT_ID)
return false;
while (func_id != HCLGE_VPORT_NUM) {
vport = hclge_get_vf_vport(hdev,
func_id - HCLGE_VF_VPORT_START_NUM);
if (!vport) {
dev_err(&hdev->pdev->dev, "invalid func id(%d)\n",
func_id);
return false;
}
dev_info(&hdev->pdev->dev, "do function %d recovery.", func_id);
ret = hclge_reset_tqp(&vport->nic);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to reset tqp, ret = %d.", ret);
return false;
}
ret = hclge_inform_vf_reset(vport, HNAE3_VF_FUNC_RESET);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to reset func %d, ret = %d.",
func_id, ret);
return false;
}
exist_set = true;
clear_bit(func_id, bitmap);
func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM);
}
return exist_set;
}
static void hclge_get_vf_fault_bitmap(struct hclge_desc *desc,
unsigned long *bitmap)
{
#define HCLGE_FIR_FAULT_BYTES 24
#define HCLGE_SEC_FAULT_BYTES 8
u8 *buff;
BUILD_BUG_ON(HCLGE_FIR_FAULT_BYTES + HCLGE_SEC_FAULT_BYTES !=
BITS_TO_BYTES(HCLGE_VPORT_NUM));
memcpy(bitmap, desc[0].data, HCLGE_FIR_FAULT_BYTES);
buff = (u8 *)bitmap + HCLGE_FIR_FAULT_BYTES;
memcpy(buff, desc[1].data, HCLGE_SEC_FAULT_BYTES);
}
int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev)
{
unsigned long vf_fault_bitmap[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
struct hclge_desc desc[2];
bool cause_by_vf = false;
int ret;
if (!test_and_clear_bit(HNAE3_VF_EXP_RESET,
&hdev->ae_dev->hw_err_reset_req) ||
!hnae3_ae_dev_vf_fault_supported(hdev->ae_dev))
return 0;
hclge_comm_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_GET_QUEUE_ERR_VF,
true);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_comm_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_GET_QUEUE_ERR_VF,
true);
ret = hclge_comm_cmd_send(&hdev->hw.hw, desc, 2);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get vf bitmap, ret = %d.\n", ret);
return ret;
}
hclge_get_vf_fault_bitmap(desc, vf_fault_bitmap);
cause_by_vf = hclge_reset_vf_in_bitmap(hdev, vf_fault_bitmap);
if (cause_by_vf)
hdev->ae_dev->hw_err_reset_req = 0;
return 0;
}

View File

@ -5,6 +5,7 @@
#define __HCLGE_ERR_H
#include "hclge_main.h"
#include "hclge_debugfs.h"
#include "hnae3.h"
#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
@ -115,6 +116,18 @@
#define HCLGE_REG_NUM_MAX 256
#define HCLGE_DESC_NO_DATA_LEN 8
#define HCLGE_BD_NUM_SSU_REG_0 10
#define HCLGE_BD_NUM_SSU_REG_1 15
#define HCLGE_BD_NUM_RPU_REG_0 1
#define HCLGE_BD_NUM_RPU_REG_1 2
#define HCLGE_BD_NUM_IGU_EGU_REG 9
#define HCLGE_BD_NUM_GEN_REG 8
#define HCLGE_MOD_REG_INFO_LEN_MAX 256
#define HCLGE_MOD_REG_EXTRA_LEN 11
#define HCLGE_MOD_REG_VALUE_LEN 9
#define HCLGE_MOD_REG_GROUP_MAX_SIZE 6
#define HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE 8
enum hclge_err_int_type {
HCLGE_ERR_INT_MSIX = 0,
HCLGE_ERR_INT_RAS_CE = 1,
@ -191,11 +204,13 @@ struct hclge_hw_error {
struct hclge_hw_module_id {
enum hclge_mod_name_list module_id;
const char *msg;
void (*query_reg_info)(struct hclge_dev *hdev);
};
struct hclge_hw_type_id {
enum hclge_err_type_list type_id;
const char *msg;
bool cause_by_vf; /* indicate the error may from vf exception */
};
struct hclge_sum_err_info {
@ -217,6 +232,28 @@ struct hclge_type_reg_err_info {
u32 hclge_reg[HCLGE_REG_NUM_MAX];
};
struct hclge_mod_reg_info {
const char *reg_name;
bool has_suffix; /* add suffix for register name */
/* the positions of reg values in hclge_desc.data */
u8 reg_offset_group[HCLGE_MOD_REG_GROUP_MAX_SIZE];
u8 group_size;
};
/* This structure defines cmdq used to query the hardware module debug
* regisgers.
*/
struct hclge_mod_reg_common_msg {
enum hclge_opcode_type cmd;
struct hclge_desc *desc;
u8 bd_num; /* the bd number of hclge_desc used */
bool need_para; /* whether this cmdq needs to add para */
/* the regs need to print */
const struct hclge_mod_reg_info *result_regs;
u16 result_regs_size;
};
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
@ -228,4 +265,5 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests);
int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev);
int hclge_handle_mac_tnl(struct hclge_dev *hdev);
int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,162 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2016-2017 Hisilicon Limited. */
#ifndef __HCLGE_EXT_H
#define __HCLGE_EXT_H
#include <linux/types.h>
#define HCLGE_PHY_ID_FOR_RTL8211 0x001cc910
#define HCLGE_PHY_ID_FOR_MVL1512 0x01410dd0
#define HCLGE_PHY_ID_FOR_YT8521 0x00000110
#define HCLGE_PHY_ID_MASK 0xFFFFFFF0U
enum hclge_phy_page_region {
HCLGE_PHY_REGION_UTP_MII,
HCLGE_PHY_REGION_UTP_MMD,
HCLGE_PHY_REGION_UTP_LDS,
HCLGE_PHY_REGION_UTP_EXT,
HCLGE_PHY_REGION_SDS_MII,
HCLGE_PHY_REGION_SDS_EXT,
HCLGE_PHY_REGION_COM_REG,
HCLGE_PHY_REGION_MAX
};
enum hclge_phy_op_code {
PHY_OP_READ,
PHY_OP_WRITE
};
#define HCLGE_8211_PHY_INDIRECT_PAGE 0xa43
#define HCLGE_8211_PHY_INDIRECT_REG 0x1b
#define HCLGE_8211_PHY_INDIRECT_DATA 0x1c
#define HCLGE_8211_PHY_INDIRECT_RANGE1_S 0xDC0
#define HCLGE_8211_PHY_INDIRECT_RANGE1_E 0xDCF
#define HCLGE_8211_PHY_INDIRECT_RANGE2_S 0xDE0
#define HCLGE_8211_PHY_INDIRECT_RANGE2_E 0xDF0
#define HCLGE_8521_PHY_SMI_SDS_ADDR 0xA000
#define HCLGE_8521_PHY_LDS_MII_ADDR 0x100
#define HCLGE_NOTIFY_PARA_CFG_PKT_EN BIT(0)
#define HCLGE_NOTIFY_PARA_CFG_START_EN BIT(1)
#define HCLGE_NOTIFY_PARA_CFG_PKT_NUM_M GENMASK(5, 2)
#define HCLGE_NOTIFY_PARA_CFG_PKT_NUM_S 2
#define HCLGE_TORUS_MAC_ID_MASK 0x3
#define HCLGE_TOURS_TCX_MAP_TCY_INIT 0x1c6144
#define HCLGE_TOURS_TCX_MAP_TCY_NODE0_INIT 0x1c6141
#define HCLGE_VLAN_FE_NIC_INGRESS 0
#define HCLGE_VLAN_FE_ROCEE_INGRESS 2
#define HCLGE_TORUS_LPBK_DROP_EN 20
#define HCLGE_TC2VLANPRI_MAPPING_EN 19
#define HCLGE_LLDP_LAN_PAIR_EN 18
#define HCLGE_MC_BC_LAN_PAIR_EN 17
#define HCLGE_UC_LAN_PAIR_EN 16
#define HCLGE_TORUS_TC1_DROP_EN BIT(26)
#define HCLGE_TOURS_TCX_MAP_TCY_MASK 0x1c71c7
struct hclge_id_info_cmd {
__le32 chip_id;
__le32 mac_id;
__le32 io_die_id;
u8 rsv[12];
};
struct hclge_num_info_cmd {
__le32 chip_num;
__le32 io_die_num;
u8 rsv[16];
};
struct hclge_port_num_info_cmd {
__le32 port_num;
u8 rsv[20];
};
struct hclge_pfc_storm_para_cmd {
__le32 dir;
__le32 enable;
__le32 period_ms;
__le32 times;
__le32 recovery_period_ms;
__le32 rsv;
};
struct hclge_notify_pkt_param_cmd {
__le32 cfg;
__le32 ipg;
__le32 data[16];
u8 vld_cfg;
u8 vld_ipg;
u8 vld_data;
u8 rsv[21];
};
struct hclge_torus_cfg_cmd {
u8 rsv[4];
__le32 lan_port_pair;
__le32 lan_fwd_tc_cfg;
__le32 pause_time_out;
__le32 pause_time_out_en;
__le32 torus_en;
};
struct hclge_sfp_present_cmd {
__le32 sfp_present;
__le32 rsv[5];
};
struct hclge_sfp_enable_cmd {
__le32 sfp_enable;
__le32 rsv[5];
};
struct hclge_lamp_signal_cmd {
__le32 type;
__le32 status;
u8 error;
u8 locate;
u8 activity;
u8 rsv[13];
};
struct hclge_reset_fail_type_map {
enum hnae3_reset_type reset_type;
enum hnae3_event_type_custom custom_type;
};
typedef int (*hclge_priv_ops_fn)(struct hclge_dev *hdev, void *data,
size_t length);
/**
* nic_event_fn_t - nic event handler prototype
* @netdev: net device
* @hnae3_event_type_custom: nic device event type
*/
typedef void (*nic_event_fn_t) (struct net_device *netdev,
enum hnae3_event_type_custom);
/**
* nic_register_event - register for nic event handling
* @event_call: nic event handler
* return 0 - success , negative - fail
*/
int nic_register_event(nic_event_fn_t event_call);
/**
* nic_unregister_event - unregister for nic event handling
* return 0 - success , negative - fail
*/
int nic_unregister_event(void);
int hclge_ext_call_event(struct hclge_dev *hdev,
enum hnae3_event_type_custom event_t);
void hclge_ext_reset_end(struct hclge_dev *hdev, bool done);
int hclge_ext_ops_handle(struct hnae3_handle *handle, int opcode,
void *data, size_t length);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,7 @@
#include "hclge_cmd.h"
#include "hclge_ptp.h"
#include "hnae3.h"
#include "hnae3_ext.h"
#include "hclge_comm_rss.h"
#include "hclge_comm_tqp_stats.h"
@ -26,6 +26,8 @@
#define HCLGE_RD_FIRST_STATS_NUM 2
#define HCLGE_RD_OTHER_STATS_NUM 4
#define HCLGE_RESET_MAX_FAIL_CNT 5
#define HCLGE_INVALID_VPORT 0xffff
#define HCLGE_PF_CFG_BLOCK_SIZE 32
@ -185,15 +187,25 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_1G_BIT BIT(0)
#define HCLGE_SUPPORT_10G_BIT BIT(1)
#define HCLGE_SUPPORT_25G_BIT BIT(2)
#define HCLGE_SUPPORT_50G_BIT BIT(3)
#define HCLGE_SUPPORT_100G_BIT BIT(4)
#define HCLGE_SUPPORT_50G_R2_BIT BIT(3)
#define HCLGE_SUPPORT_100G_R4_BIT BIT(4)
/* to be compatible with exsit board */
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
#define HCLGE_SUPPORT_200G_BIT BIT(8)
#define HCLGE_SUPPORT_200G_R4_EXT_BIT BIT(8)
#define HCLGE_SUPPORT_50G_R1_BIT BIT(9)
#define HCLGE_SUPPORT_100G_R2_BIT BIT(10)
#define HCLGE_SUPPORT_200G_R4_BIT BIT(11)
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
#define HCLGE_SUPPORT_50G_BITS \
(HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT)
#define HCLGE_SUPPORT_100G_BITS \
(HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT)
#define HCLGE_SUPPORT_200G_BITS \
(HCLGE_SUPPORT_200G_R4_EXT_BIT | HCLGE_SUPPORT_200G_R4_BIT)
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
@ -214,6 +226,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_FD_TBL_CHANGED,
HCLGE_STATE_FD_CLEAR_ALL,
HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_HW_QB_ENABLE,
HCLGE_STATE_PTP_EN,
HCLGE_STATE_PTP_TX_HANDLING,
HCLGE_STATE_FEC_STATS_UPDATING,
@ -246,6 +259,12 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
/* hilink version */
enum hclge_hilink_version {
HCLGE_HILINK_H32 = 0,
HCLGE_HILINK_H60 = 1,
};
#define QUERY_SFP_SPEED 0
#define QUERY_ACTIVE_SPEED 1
@ -369,6 +388,7 @@ struct hclge_tm_info {
enum hclge_fc_mode fc_mode;
u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
u8 pfc_en; /* PFC enabled or not for user priority */
u16 pause_time;
};
/* max number of mac statistics on each version */
@ -624,6 +644,7 @@ struct key_info {
#define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0)
#define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0)
#define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0)
#define HCLGE_FD_VXLAN_VNI_UNMASK GENMASK(31, 0)
/* assigned by firmware, the real filter number for each pf may be less */
#define MAX_FD_FILTER_NUM 4096
@ -639,6 +660,7 @@ enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_ARFS_ACTIVE,
HCLGE_FD_EP_ACTIVE,
HCLGE_FD_TC_FLOWER_ACTIVE,
HCLGE_FD_QB_ACTIVE,
};
enum HCLGE_FD_PACKET_TYPE {
@ -717,6 +739,7 @@ struct hclge_fd_rule_tuples {
u32 l4_user_def;
u8 ip_tos;
u8 ip_proto;
u32 outer_tun_vni;
};
struct hclge_fd_rule {
@ -965,6 +988,8 @@ struct hclge_dev {
struct hclge_ptp *ptp;
struct devlink *devlink;
struct hclge_comm_rss_cfg rss_cfg;
struct hnae3_notify_pkt_param notify_param;
struct hnae3_torus_param torus_param;
};
/* VPort level vlan tag configuration for TX direction */
@ -994,6 +1019,7 @@ struct hclge_rx_vtag_cfg {
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
HCLGE_VPORT_STATE_QB_CHANGE,
HCLGE_VPORT_STATE_PROMISC_CHANGE,
HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
HCLGE_VPORT_STATE_INITED,
@ -1024,6 +1050,8 @@ struct hclge_vf_info {
u32 spoofchk;
u32 max_tx_rate;
u32 trusted;
u8 request_qb_en;
u8 qb_en;
u8 request_uc_en;
u8 request_mc_en;
u8 request_bc_en;
@ -1076,6 +1104,11 @@ struct hclge_mac_speed_map {
u32 speed_fw; /* speed defined in firmware */
};
struct hclge_link_mode_bmap {
u16 support_bit;
enum ethtool_link_mode_bit_indices link_mode;
};
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
@ -1146,4 +1179,12 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
int hclge_push_vf_link_status(struct hclge_vport *vport);
int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
int hclge_mac_update_stats(struct hclge_dev *hdev);
struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf);
int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type);
int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version);
void hclge_reset_task_schedule(struct hclge_dev *hdev);
void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle);
void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
u8 *module_type);
int hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable);
#endif

View File

@ -124,7 +124,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type)
int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type)
{
__le16 msg_data;
u8 dest_vfid;
@ -811,7 +811,7 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
ae_dev->ops->reset_event(hdev->pdev, NULL);
hclge_reset_event(hdev->pdev, &hdev->vport[0].nic);
}
static void hclge_handle_vf_tbl(struct hclge_vport *vport,
@ -831,6 +831,36 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
}
}
static void hclge_handle_vf_qb(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg)
{
struct hclge_dev *hdev = vport->back;
if (mbx_req->msg.subcode == HCLGE_MBX_QB_CHECK_CAPS) {
struct hnae3_handle *handle = &hdev->vport[0].nic;
resp_msg->data[0] = test_bit(HNAE3_PFLAG_FD_QB_ENABLE,
&handle->supported_pflags);
resp_msg->len = sizeof(u8);
} else if (mbx_req->msg.subcode == HCLGE_MBX_QB_ENABLE) {
vport->vf_info.request_qb_en = mbx_req->msg.data[0];
set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state);
} else if (mbx_req->msg.subcode == HCLGE_MBX_QB_GET_STATE) {
u16 msg_data = vport->vf_info.qb_en;
int ret;
ret = hclge_send_mbx_msg(vport, (u8 *)&msg_data,
sizeof(msg_data),
HCLGE_MBX_PUSH_QB_STATE,
vport->vport_id);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to inform qb state to vport %u, ret = %d\n",
vport->vport_id, ret);
}
}
static int
hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
{
@ -1040,6 +1070,12 @@ static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param)
return 0;
}
static int hclge_mbx_handle_vf_qb_handler(struct hclge_mbx_ops_param *param)
{
hclge_handle_vf_qb(param->vport, param->req, param->resp_msg);
return 0;
}
static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
[HCLGE_MBX_RESET] = hclge_mbx_reset_handler,
[HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler,
@ -1064,6 +1100,7 @@ static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
[HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler,
[HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler,
[HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler,
[HCLGE_MBX_SET_QB] = hclge_mbx_handle_vf_qb_handler,
[HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler,
[HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler,
[HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler,
@ -1123,10 +1160,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) ||
req->mbx_src_vfid > hdev->num_req_vfs)) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %u\n",
req->msg.code);
"dropped invalid mailbox message, code = %u, vfid = %u\n",
req->msg.code, req->mbx_src_vfid);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;

View File

@ -185,8 +185,8 @@ int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
u8 pause_trans_gap, u16 pause_trans_time)
int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
u8 pause_trans_gap, u16 pause_trans_time)
{
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
@ -1498,7 +1498,7 @@ static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
return hclge_pause_param_cfg(hdev, mac->mac_addr,
HCLGE_DEFAULT_PAUSE_TRANS_GAP,
HCLGE_DEFAULT_PAUSE_TRANS_TIME);
hdev->tm_info.pause_time);
}
static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
@ -1692,6 +1692,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
/* fc_mode is HCLGE_FC_FULL on reset */
hdev->tm_info.fc_mode = HCLGE_FC_FULL;
hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
hdev->tm_info.pause_time = HCLGE_DEFAULT_PAUSE_TRANS_TIME;
if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
hdev->tm_info.num_pg != 1)
@ -2143,3 +2144,19 @@ int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable)
return ret;
}
void hclge_reset_tc_config(struct hclge_dev *hdev)
{
struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_knic_private_info *kinfo;
kinfo = &vport->nic.kinfo;
if (!kinfo->tc_info.mqprio_destroy)
return;
/* clear tc info, including mqprio_destroy and mqprio_active */
memset(&kinfo->tc_info, 0, sizeof(kinfo->tc_info));
hclge_tm_schd_info_update(hdev, 0);
hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
}

View File

@ -244,6 +244,8 @@ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
u8 pfc_bitmap);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
u8 pause_trans_gap, u16 pause_trans_time);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_mac_pause_setup_hw(struct hclge_dev *hdev);
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
@ -277,4 +279,5 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
int hclge_up_to_tc_map(struct hclge_dev *hdev);
int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable);
void hclge_reset_tc_config(struct hclge_dev *hdev);
#endif

View File

@ -10,6 +10,7 @@
#include <linux/tracepoint.h>
#define PF_DESC_LEN (sizeof(struct hclge_desc) / sizeof(u32))
#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
@ -77,6 +78,99 @@ TRACE_EVENT(hclge_pf_mbx_send,
)
);
DECLARE_EVENT_CLASS(hclge_pf_cmd_template,
TP_PROTO(struct hclge_comm_hw *hw,
struct hclge_desc *desc,
int index,
int num),
TP_ARGS(hw, desc, index, num),
TP_STRUCT__entry(__field(u16, opcode)
__field(u16, flag)
__field(u16, retval)
__field(u16, rsv)
__field(int, index)
__field(int, num)
__string(pciname, pci_name(hw->cmq.csq.pdev))
__array(u32, data, HCLGE_DESC_DATA_LEN)),
TP_fast_assign(int i;
__entry->opcode = le16_to_cpu(desc->opcode);
__entry->flag = le16_to_cpu(desc->flag);
__entry->retval = le16_to_cpu(desc->retval);
__entry->rsv = le16_to_cpu(desc->rsv);
__entry->index = index;
__entry->num = num;
__assign_str(pciname, pci_name(hw->cmq.csq.pdev));
for (i = 0; i < HCLGE_DESC_DATA_LEN; i++)
__entry->data[i] = le32_to_cpu(desc->data[i]);),
TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s",
__get_str(pciname), __entry->opcode,
__entry->index, __entry->num,
__entry->flag, __entry->retval, __entry->rsv,
__print_array(__entry->data,
HCLGE_DESC_DATA_LEN, sizeof(u32)))
);
DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_send,
TP_PROTO(struct hclge_comm_hw *hw,
struct hclge_desc *desc,
int index,
int num),
TP_ARGS(hw, desc, index, num)
);
DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_get,
TP_PROTO(struct hclge_comm_hw *hw,
struct hclge_desc *desc,
int index,
int num),
TP_ARGS(hw, desc, index, num)
);
DECLARE_EVENT_CLASS(hclge_pf_special_cmd_template,
TP_PROTO(struct hclge_comm_hw *hw,
u32 *data,
int index,
int num),
TP_ARGS(hw, data, index, num),
TP_STRUCT__entry(__field(int, index)
__field(int, num)
__string(pciname, pci_name(hw->cmq.csq.pdev))
__array(u32, data, PF_DESC_LEN)),
TP_fast_assign(int i;
__entry->index = index;
__entry->num = num;
__assign_str(pciname, pci_name(hw->cmq.csq.pdev));
for (i = 0; i < PF_DESC_LEN; i++)
__entry->data[i] = le32_to_cpu(data[i]);
),
TP_printk("%s %d-%d data:%s",
__get_str(pciname),
__entry->index, __entry->num,
__print_array(__entry->data,
PF_DESC_LEN, sizeof(u32)))
);
DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_send,
TP_PROTO(struct hclge_comm_hw *hw,
u32 *desc,
int index,
int num),
TP_ARGS(hw, desc, index, num));
DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_get,
TP_PROTO(struct hclge_comm_hw *hw,
u32 *desc,
int index,
int num),
TP_ARGS(hw, desc, index, num)
);
#endif /* _HCLGE_TRACE_H_ */
/* This must be outside ifdef _HCLGE_TRACE_H */

View File

@ -41,8 +41,9 @@ static int hclgevf_devlink_reload_down(struct devlink *devlink,
struct pci_dev *pdev = hdev->pdev;
int ret;
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
dev_err(&pdev->dev, "reset is handling\n");
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state)) {
dev_err(&pdev->dev, "reset is handling or driver removed\n");
return -EBUSY;
}

View File

@ -11,6 +11,7 @@
#include "hnae3.h"
#include "hclgevf_devlink.h"
#include "hclge_comm_rss.h"
#include "hclgevf_trace.h"
#define HCLGEVF_NAME "hclgevf"
@ -47,6 +48,42 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num, bool is_special)
{
int i;
trace_hclge_vf_cmd_send(hw, desc, 0, num);
if (is_special)
return;
for (i = 1; i < num; i++)
trace_hclge_vf_cmd_send(hw, &desc[i], i, num);
}
static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num, bool is_special)
{
int i;
if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
return;
trace_hclge_vf_cmd_get(hw, desc, 0, num);
if (is_special)
return;
for (i = 1; i < num; i++)
trace_hclge_vf_cmd_get(hw, &desc[i], i, num);
}
static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = {
.trace_cmd_send = hclgevf_trace_cmd_send,
.trace_cmd_get = hclgevf_trace_cmd_get,
};
void hclgevf_arq_init(struct hclgevf_dev *hdev)
{
struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
@ -354,6 +391,74 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
return 0;
}
static void hclgevf_update_fd_qb_state(struct hclgevf_dev *hdev)
{
struct hnae3_handle *handle = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
int ret;
if (!hdev->qb_cfg.pf_support_qb ||
!test_bit(HNAE3_PFLAG_FD_QB_ENABLE, &handle->priv_flags))
return;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_QB,
HCLGE_MBX_QB_GET_STATE);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (ret)
dev_err(&hdev->pdev->dev, "failed to get qb state, ret = %d",
ret);
}
static void hclgevf_get_pf_qb_caps(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_vf_to_pf_msg send_msg;
u8 resp_msg;
int ret;
if (!test_bit(HNAE3_DEV_SUPPORT_QB_B, ae_dev->caps))
return;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_QB,
HCLGE_MBX_QB_CHECK_CAPS);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
sizeof(resp_msg));
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get qb caps from PF, ret = %d", ret);
return;
}
hdev->qb_cfg.pf_support_qb = resp_msg > 0;
}
static void hclgevf_set_fd_qb(struct hnae3_handle *handle)
{
#define HCLGEVF_QB_MBX_STATE_OFFSET 0
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
u8 resp_msg;
int ret;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_QB,
HCLGE_MBX_QB_ENABLE);
send_msg.data[HCLGEVF_QB_MBX_STATE_OFFSET] =
test_bit(HNAE3_PFLAG_FD_QB_ENABLE, &handle->priv_flags) ? 1 : 0;
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
sizeof(resp_msg));
if (ret)
dev_err(&hdev->pdev->dev, "failed to set qb state, ret = %d",
ret);
}
static bool hclgevf_query_fd_qb_state(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
return hdev->qb_cfg.hw_qb_en;
}
static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
{
struct hclge_vf_to_pf_msg send_msg;
@ -1901,6 +2006,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
hclgevf_sync_promisc_mode(hdev);
hclgevf_update_fd_qb_state(hdev);
hdev->last_serv_processed = jiffies;
out:
@ -2796,6 +2903,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
}
hclgevf_arq_init(hdev);
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
&hdev->fw_version, false,
hdev->reset_pending);
@ -2854,6 +2962,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_cmd_queue_init;
hclgevf_arq_init(hdev);
hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops);
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
&hdev->fw_version, false,
hdev->reset_pending);
@ -2939,6 +3049,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
hclgevf_get_pf_qb_caps(hdev);
hclgevf_init_rxd_adv_layout(hdev);
set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
@ -3323,6 +3435,8 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_promisc_mode = hclgevf_set_promisc_mode,
.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
.get_cmdq_stat = hclgevf_get_cmdq_stat,
.request_flush_qb_config = hclgevf_set_fd_qb,
.query_fd_qb_state = hclgevf_query_fd_qb_state,
};
static struct hnae3_ae_algo ae_algovf = {

View File

@ -206,6 +206,11 @@ struct hclgevf_mac_table_cfg {
struct list_head mc_mac_list;
};
struct hclgevf_qb_cfg {
bool pf_support_qb;
bool hw_qb_en;
};
struct hclgevf_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@ -274,6 +279,7 @@ struct hclgevf_dev {
unsigned long serv_processed_cnt;
unsigned long last_serv_processed;
struct hclgevf_qb_cfg qb_cfg;
struct devlink *devlink;
};

View File

@ -271,6 +271,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_LINK_STAT_MODE:
case HCLGE_MBX_PUSH_VLAN_INFO:
case HCLGE_MBX_PUSH_PROMISC_INFO:
case HCLGE_MBX_PUSH_QB_STATE:
hclgevf_handle_mbx_msg(hdev, req);
break;
default:
@ -296,6 +297,19 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
"Promisc mode is closed by host for being untrusted.\n");
}
static void hclgevf_parse_qb_info(struct hclgevf_dev *hdev, u16 qb_state)
{
#define HCLGEVF_HW_QB_ON 1
#define HCLGEVF_HW_QB_OFF 0
if (qb_state > HCLGEVF_HW_QB_ON) {
dev_warn(&hdev->pdev->dev, "Invalid state, ignored.\n");
return;
}
hdev->qb_cfg.hw_qb_en = qb_state > HCLGEVF_HW_QB_OFF;
}
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
struct hclge_mbx_port_base_vlan *vlan_info;
@ -374,6 +388,9 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_PUSH_PROMISC_INFO:
hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1]));
break;
case HCLGE_MBX_PUSH_QB_STATE:
hclgevf_parse_qb_info(hdev, msg_q[1]);
break;
default:
dev_err(&hdev->pdev->dev,
"fetched unsupported(%u) message from arq\n",

View File

@ -77,6 +77,56 @@ TRACE_EVENT(hclge_vf_mbx_send,
)
);
DECLARE_EVENT_CLASS(hclge_vf_cmd_template,
TP_PROTO(struct hclge_comm_hw *hw,
struct hclge_desc *desc,
int index,
int num),
TP_ARGS(hw, desc, index, num),
TP_STRUCT__entry(__field(u16, opcode)
__field(u16, flag)
__field(u16, retval)
__field(u16, rsv)
__field(int, index)
__field(int, num)
__string(pciname, pci_name(hw->cmq.csq.pdev))
__array(u32, data, HCLGE_DESC_DATA_LEN)),
TP_fast_assign(int i;
__entry->opcode = le16_to_cpu(desc->opcode);
__entry->flag = le16_to_cpu(desc->flag);
__entry->retval = le16_to_cpu(desc->retval);
__entry->rsv = le16_to_cpu(desc->rsv);
__entry->index = index;
__entry->num = num;
__assign_str(pciname, pci_name(hw->cmq.csq.pdev));
for (i = 0; i < HCLGE_DESC_DATA_LEN; i++)
__entry->data[i] = le32_to_cpu(desc->data[i]);),
TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s",
__get_str(pciname), __entry->opcode,
__entry->index, __entry->num,
__entry->flag, __entry->retval, __entry->rsv,
__print_array(__entry->data,
HCLGE_DESC_DATA_LEN, sizeof(u32)))
);
DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_send,
TP_PROTO(struct hclge_comm_hw *hw,
struct hclge_desc *desc,
int index,
int num),
TP_ARGS(hw, desc, index, num));
DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_get,
TP_PROTO(struct hclge_comm_hw *hw,
struct hclge_desc *desc,
int index,
int num),
TP_ARGS(hw, desc, index, num));
#endif /* _HCLGEVF_TRACE_H_ */
/* This must be outside ifdef _HCLGEVF_TRACE_H */

View File

@ -211,4 +211,15 @@ config PTP_DFL_TOD
To compile this driver as a module, choose M here: the module
will be called ptp_dfl_tod.
config PTP_HISI
tristate "HiSilicon PTP sync platform driver"
help
PTP sync driver work on multichip system, eliminates the bus latency
between multichip, and provide a higher precision clock source. But
the clock source of PTP sync device is from the RTC of HNS3 ethernet
device, so, if you want the PTP sync device works, you must enable
HNS3 driver also.
If unsure, say N.
endmenu

View File

@ -20,3 +20,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_MOCK) += ptp_mock.o
obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
obj-$(CONFIG_PTP_1588_CLOCK_OCP) += ptp_ocp.o
obj-$(CONFIG_PTP_DFL_TOD) += ptp_dfl_tod.o
obj-$(CONFIG_PTP_HISI) += ptp_hisi.o

1027
drivers/ptp/ptp_hisi.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -284,6 +284,18 @@ enum {
SFF8024_ID_QSFP_8438 = 0x0c,
SFF8024_ID_QSFP_8436_8636 = 0x0d,
SFF8024_ID_QSFP28_8636 = 0x11,
SFF8024_ID_CXP2 = 0x12,
SFF8024_ID_CDFP = 0x13,
SFF8024_ID_HD4X_FANOUT = 0x14,
SFF8024_ID_HD8X_FANOUT = 0x15,
SFF8024_ID_CDFP_S3 = 0x16,
SFF8024_ID_MICRO_QSFP = 0x17,
SFF8024_ID_QSFP_DD = 0x18,
SFF8024_ID_OSFP = 0x19,
SFF8024_ID_DSFP = 0x1B,
SFF8024_ID_QSFP_PLUS_CMIS = 0x1E,
SFF8024_ID_SFP_DD_CMIS = 0x1F,
SFF8024_ID_SFP_PLUS_CMIS = 0x20,
SFF8024_ENCODING_UNSPEC = 0x00,
SFF8024_ENCODING_8B10B = 0x01,

View File

@ -1017,6 +1017,28 @@ struct ethtool_usrip4_spec {
__u8 proto;
};
/**
* struct ethtool_vxlan4_spec - general flow specification for VxLAN IPv4
* @vni: VxLAN network identifier
* @dst: Inner destination eth addr
* @src: Inner source eth addr
* @eth_type: Inner ethernet type
* @tos: Inner type-of-service
* @l4_proto: Inner transport protocol number
* @ip4src: Inner source host
* @ip4dst: Inner destination host
*/
struct ethtool_vxlan4_spec {
__be32 vni;
__u8 dst[ETH_ALEN];
__u8 src[ETH_ALEN];
__be16 eth_type;
__u8 tos;
__u8 l4_proto;
__be32 ip4src;
__be32 ip4dst;
};
/**
* struct ethtool_tcpip6_spec - flow specification for TCP/IPv6 etc.
* @ip6src: Source host
@ -1067,6 +1089,28 @@ struct ethtool_usrip6_spec {
__u8 l4_proto;
};
/**
* struct ethtool_vxlan6_spec - general flow specification for VxLAN IPv6
* @vni: VxLAN network identifier
* @dst: Inner destination eth addr
* @src: Inner source eth addr
* @eth_type: Inner ethernet type
* @tclass: Inner traffic Class
* @l4_proto: Inner transport protocol number
* @ip6src: Inner source host
* @ip6dst: Inner destination host
*/
struct ethtool_vxlan6_spec {
__be32 vni;
__u8 dst[ETH_ALEN];
__u8 src[ETH_ALEN];
__be16 eth_type;
__u8 tclass;
__u8 l4_proto;
__be32 ip6src[4];
__be32 ip6dst[4];
};
union ethtool_flow_union {
struct ethtool_tcpip4_spec tcp_ip4_spec;
struct ethtool_tcpip4_spec udp_ip4_spec;
@ -1074,12 +1118,14 @@ union ethtool_flow_union {
struct ethtool_ah_espip4_spec ah_ip4_spec;
struct ethtool_ah_espip4_spec esp_ip4_spec;
struct ethtool_usrip4_spec usr_ip4_spec;
struct ethtool_vxlan4_spec vxlan_ip4_spec;
struct ethtool_tcpip6_spec tcp_ip6_spec;
struct ethtool_tcpip6_spec udp_ip6_spec;
struct ethtool_tcpip6_spec sctp_ip6_spec;
struct ethtool_ah_espip6_spec ah_ip6_spec;
struct ethtool_ah_espip6_spec esp_ip6_spec;
struct ethtool_usrip6_spec usr_ip6_spec;
struct ethtool_vxlan6_spec vxlan_ip6_spec;
struct ethhdr ether_spec;
__u8 hdata[52];
};
@ -2011,6 +2057,8 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define IPV4_FLOW 0x10 /* hash only */
#define IPV6_FLOW 0x11 /* hash only */
#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
#define VXLAN_V4_FLOW 0x43 /* spec only (vxlan_ip4_spec) */
#define VXLAN_V6_FLOW 0x44 /* spec only (vxlan_ip6_spec) */
/* Flag to enable additional fields in struct ethtool_rx_flow_spec */
#define FLOW_EXT 0x80000000
#define FLOW_MAC_EXT 0x40000000