Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: some code optimizations & cleanups & bugfixes

This patch-set includes code optimizations, cleanups and bugfixes for
the HNS3 ethernet controller driver.

[patch 1/12 - 6/12] adds some code optimizations and bugfixes about RAS
and MSI-X HW error.

[patch 7/12] fixes a loading issue.

[patch 8/12 - 11/12] adds some bugfixes.

[patch 12/12] adds some cleanups, which does not change the logic of code.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-06-14 19:26:16 -07:00
commit 26e392ca9b
15 changed files with 494 additions and 326 deletions

View File

@ -26,7 +26,8 @@ static bool hnae3_client_match(enum hnae3_client_type client_type)
}
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, int inited)
struct hnae3_ae_dev *ae_dev,
unsigned int inited)
{
if (!client || !ae_dev)
return;

View File

@ -214,6 +214,7 @@ struct hnae3_ae_dev {
struct list_head node;
u32 flag;
u8 override_pci_need_reset; /* fix to stop multiple reset happening */
unsigned long hw_err_reset_req;
enum hnae3_reset_type reset_type;
void *priv;
};
@ -459,6 +460,8 @@ struct hnae3_ae_ops {
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
enum hnae3_reset_type (*get_reset_level)(struct hnae3_ae_dev *ae_dev,
unsigned long *addr);
void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type);
void (*get_channels)(struct hnae3_handle *handle,
@ -490,7 +493,7 @@ struct hnae3_ae_ops {
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@ -642,5 +645,6 @@ void hnae3_unregister_client(struct hnae3_client *client);
int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, int inited);
struct hnae3_ae_dev *ae_dev,
unsigned int inited);
#endif

View File

@ -11,7 +11,8 @@
static struct dentry *hns3_dbgfs_root;
static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf)
static int hns3_dbg_queue_info(struct hnae3_handle *h,
const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
@ -155,7 +156,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
return 0;
}
static int hns3_dbg_bd_info(struct hnae3_handle *h, char *cmd_buf)
static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;

View File

@ -29,7 +29,7 @@
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_ring(struct hnae3_handle *h);
static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3";
@ -143,6 +143,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
tqp_vectors->vector_irq);
hns3_nic_uninit_irq(priv);
return ret;
}
@ -487,7 +488,12 @@ static void hns3_nic_net_down(struct net_device *netdev)
/* free irq resources */
hns3_nic_uninit_irq(priv);
hns3_clear_all_ring(priv->ae_handle);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
* to disable the ring through firmware when downing the netdev.
*/
if (!hns3_nic_resetting(netdev))
hns3_clear_all_ring(priv->ae_handle);
}
static int hns3_nic_net_stop(struct net_device *netdev)
@ -1005,7 +1011,8 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int size, int frag_end, enum hns_desc_type type)
unsigned int size, int frag_end,
enum hns_desc_type type)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
@ -1509,12 +1516,12 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
struct hnae3_knic_private_info *kinfo;
u8 tc = mqprio_qopt->qopt.num_tc;
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
struct hnae3_handle *h;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
@ -1526,6 +1533,9 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (!netdev)
return -EINVAL;
h = hns3_get_handle(netdev);
kinfo = &h->kinfo;
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
}
@ -1930,17 +1940,22 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
const struct hnae3_ae_ops *ops = ae_dev->ops;
enum hnae3_reset_type reset_type;
struct device *dev = &pdev->dev;
dev_info(dev, "requesting reset due to PCI error\n");
if (!ae_dev || !ae_dev->ops)
return PCI_ERS_RESULT_NONE;
/* request the reset */
if (ae_dev->ops->reset_event) {
if (!ae_dev->override_pci_need_reset)
ae_dev->ops->reset_event(pdev, NULL);
if (ops->reset_event) {
if (!ae_dev->override_pci_need_reset) {
reset_type = ops->get_reset_level(ae_dev,
&ae_dev->hw_err_reset_req);
ops->set_default_reset_request(ae_dev, reset_type);
dev_info(dev, "requesting reset due to PCI error\n");
ops->reset_event(pdev, NULL);
}
return PCI_ERS_RESULT_RECOVERED;
}
@ -3410,7 +3425,7 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
}
static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
int ring_type)
unsigned int ring_type)
{
struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps;
@ -3905,7 +3920,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_del_all_fd_rules(netdev, true);
hns3_force_clear_all_rx_ring(handle);
hns3_force_clear_all_ring(handle);
hns3_nic_uninit_vector_data(priv);
@ -4074,7 +4089,7 @@ static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
}
}
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
static void hns3_force_clear_all_ring(struct hnae3_handle *h)
{
struct net_device *ndev = h->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev);
@ -4082,6 +4097,9 @@ static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
u32 i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
ring = priv->ring_data[i].ring;
hns3_clear_tx_ring(ring);
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
hns3_force_clear_rx_ring(ring);
}
@ -4312,7 +4330,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
hns3_force_clear_all_rx_ring(handle);
hns3_clear_all_ring(handle);
hns3_force_clear_all_ring(handle);
hns3_nic_uninit_vector_data(priv);

View File

@ -417,7 +417,7 @@ struct hns3_enet_ring {
*/
int next_to_clean;
int pull_len; /* head length for current packet */
u32 pull_len; /* head length for current packet */
u32 frag_num;
unsigned char *va; /* first buffer address for current packet */

View File

@ -250,11 +250,13 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
skb_get(skb);
tx_ret = hns3_nic_net_xmit(skb, ndev);
if (tx_ret == NETDEV_TX_OK)
if (tx_ret == NETDEV_TX_OK) {
good_cnt++;
else
} else {
kfree_skb(skb);
netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
tx_ret);
}
}
if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;

View File

@ -61,8 +61,8 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
struct hclge_dbg_dfx_message *dfx_message,
char *cmd_buf, int msg_num, int offset,
enum hclge_opcode_type cmd)
const char *cmd_buf, int msg_num,
int offset, enum hclge_opcode_type cmd)
{
#define BD_DATA_NUM 6
@ -111,7 +111,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
kfree(desc_src);
}
static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf)
static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_dbg_bitmap_cmd *bitmap;
@ -211,7 +211,7 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf)
dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
}
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf)
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
int msg_num;
@ -541,7 +541,8 @@ err_tm_cmd_send:
cmd, ret);
}
static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf)
static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
const char *cmd_buf)
{
struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
@ -984,7 +985,8 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
* @hdev: pointer to struct hclge_dev
* @cmd_buf: string that contains offset and length
*/
static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf)
static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
const char *cmd_buf)
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
@ -1063,7 +1065,7 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
}
}
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;

View File

@ -631,29 +631,20 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ /* sentinel */ }
};
static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
const struct hclge_hw_error *err,
u32 err_sts)
static void hclge_log_error(struct device *dev, char *reg,
const struct hclge_hw_error *err,
u32 err_sts, unsigned long *reset_requests)
{
enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
bool need_reset = false;
while (err->msg) {
if (err->int_msk & err_sts) {
dev_warn(dev, "%s %s found [error status=0x%x]\n",
reg, err->msg, err_sts);
if (err->reset_level != HNAE3_NONE_RESET &&
err->reset_level >= reset_level) {
reset_level = err->reset_level;
need_reset = true;
}
if (err->reset_level &&
err->reset_level != HNAE3_NONE_RESET)
set_bit(err->reset_level, reset_requests);
}
err++;
}
if (need_reset)
return reset_level;
else
return HNAE3_NONE_RESET;
}
/* hclge_cmd_query_error: read the error information
@ -1069,13 +1060,6 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
return ret;
}
#define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \
do { \
if (ae_dev->ops->set_default_reset_request) \
ae_dev->ops->set_default_reset_request(ae_dev, \
reset_type); \
} while (0)
/* hclge_handle_mpf_ras_error: handle all main PF RAS errors
* @hdev: pointer to struct hclge_dev
* @desc: descriptor for describing the command
@ -1089,7 +1073,6 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
int num)
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
enum hnae3_reset_type reset_level;
struct device *dev = &hdev->pdev->dev;
__le32 *desc_data;
u32 status;
@ -1106,95 +1089,74 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* log HNS common errors */
status = le32_to_cpu(desc[0].data[0]);
if (status) {
reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
&hclge_imp_tcm_ecc_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
&hclge_imp_tcm_ecc_int[0], status,
&ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[1]);
if (status) {
reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
&hclge_cmdq_nic_mem_ecc_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
&hclge_cmdq_nic_mem_ecc_int[0], status,
&ae_dev->hw_err_reset_req);
if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
if ((le32_to_cpu(desc[0].data[2])) & BIT(0))
dev_warn(dev, "imp_rd_data_poison_err found\n");
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
}
status = le32_to_cpu(desc[0].data[3]);
if (status) {
reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
&hclge_tqp_int_ecc_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
&hclge_tqp_int_ecc_int[0], status,
&ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[4]);
if (status) {
reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
&hclge_msix_sram_ecc_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "MSIX_ECC_INT_STS",
&hclge_msix_sram_ecc_int[0], status,
&ae_dev->hw_err_reset_req);
/* log SSU(Storage Switch Unit) errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*(desc_data + 2));
if (status) {
reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
&hclge_ssu_mem_ecc_err_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
&hclge_ssu_mem_ecc_err_int[0], status,
&ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
if (status) {
reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
&hclge_ssu_com_err_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "SSU_COMMON_ERR_INT",
&hclge_ssu_com_err_int[0], status,
&ae_dev->hw_err_reset_req);
/* log IGU(Ingress Unit) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
if (status) {
reset_level = hclge_log_error(dev, "IGU_INT_STS",
&hclge_igu_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "IGU_INT_STS",
&hclge_igu_int[0], status,
&ae_dev->hw_err_reset_req);
/* log PPP(Programmable Packet Process) errors */
desc_data = (__le32 *)&desc[4];
status = le32_to_cpu(*(desc_data + 1));
if (status) {
reset_level =
hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
&hclge_ppp_mpf_abnormal_int_st1[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
&hclge_ppp_mpf_abnormal_int_st1[0], status,
&ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
if (status) {
reset_level =
hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
&hclge_ppp_mpf_abnormal_int_st3[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
&hclge_ppp_mpf_abnormal_int_st3[0], status,
&ae_dev->hw_err_reset_req);
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[5];
@ -1202,61 +1164,50 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
if (status) {
dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
"rpu_rx_pkt_ecc_mbit_err");
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
status = le32_to_cpu(*(desc_data + 2));
if (status) {
reset_level =
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
&hclge_ppu_mpf_abnormal_int_st2[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
&hclge_ppu_mpf_abnormal_int_st2[0], status,
&ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
if (status) {
reset_level =
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
&hclge_ppu_mpf_abnormal_int_st3[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
&hclge_ppu_mpf_abnormal_int_st3[0], status,
&ae_dev->hw_err_reset_req);
/* log TM(Traffic Manager) errors */
desc_data = (__le32 *)&desc[6];
status = le32_to_cpu(*desc_data);
if (status) {
reset_level = hclge_log_error(dev, "TM_SCH_RINT",
&hclge_tm_sch_rint[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "TM_SCH_RINT",
&hclge_tm_sch_rint[0], status,
&ae_dev->hw_err_reset_req);
/* log QCN(Quantized Congestion Control) errors */
desc_data = (__le32 *)&desc[7];
status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
if (status) {
reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
&hclge_qcn_fifo_rint[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "QCN_FIFO_RINT",
&hclge_qcn_fifo_rint[0], status,
&ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
if (status) {
reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
&hclge_qcn_ecc_rint[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "QCN_ECC_RINT",
&hclge_qcn_ecc_rint[0], status,
&ae_dev->hw_err_reset_req);
/* log NCSI errors */
desc_data = (__le32 *)&desc[9];
status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
if (status) {
reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
&hclge_ncsi_err_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "NCSI_ECC_INT_RPT",
&hclge_ncsi_err_int[0], status,
&ae_dev->hw_err_reset_req);
/* clear all main PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
@ -1281,7 +1232,6 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
enum hnae3_reset_type reset_level;
__le32 *desc_data;
u32 status;
int ret;
@ -1297,48 +1247,38 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* log SSU(Storage Switch Unit) errors */
status = le32_to_cpu(desc[0].data[0]);
if (status) {
reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
&hclge_ssu_port_based_err_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
&hclge_ssu_port_based_err_int[0], status,
&ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[1]);
if (status) {
reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
&hclge_ssu_fifo_overflow_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
&hclge_ssu_fifo_overflow_int[0], status,
&ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[2]);
if (status) {
reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
&hclge_ssu_ets_tcg_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "SSU_ETS_TCG_INT",
&hclge_ssu_ets_tcg_int[0], status,
&ae_dev->hw_err_reset_req);
/* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
if (status) {
reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
&hclge_igu_egu_tnl_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
&hclge_igu_egu_tnl_int[0], status,
&ae_dev->hw_err_reset_req);
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
if (status) {
reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
&hclge_ppu_pf_abnormal_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if (status)
hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
&hclge_ppu_pf_abnormal_int[0], status,
&ae_dev->hw_err_reset_req);
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
@ -1597,7 +1537,7 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
if (reset_type != HNAE3_NONE_RESET)
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
set_bit(reset_type, &ae_dev->hw_err_reset_req);
}
static const struct hclge_hw_blk hw_blk[] = {
@ -1655,8 +1595,18 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
struct device *dev = &hdev->pdev->dev;
u32 status;
if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
dev_err(dev,
"Can't recover - RAS error reported during dev init\n");
return PCI_ERS_RESULT_NONE;
}
status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
if (status & HCLGE_RAS_REG_NFE_MASK ||
status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
ae_dev->hw_err_reset_req = 0;
/* Handling Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
dev_warn(dev,
@ -1676,8 +1626,9 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
hclge_handle_rocee_ras_error(ae_dev);
}
if (status & HCLGE_RAS_REG_NFE_MASK ||
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
if ((status & HCLGE_RAS_REG_NFE_MASK ||
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) &&
ae_dev->hw_err_reset_req) {
ae_dev->override_pci_need_reset = 0;
return PCI_ERS_RESULT_NEED_RESET;
}
@ -1686,6 +1637,21 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
return PCI_ERS_RESULT_RECOVERED;
}
static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
struct hclge_desc *desc, bool is_mpf,
u32 bd_num)
{
if (is_mpf)
desc[0].opcode =
cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT);
else
desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
}
/* hclge_query_8bd_info: query information about over_8bd_nfe_err
* @hdev: pointer to struct hclge_dev
* @vf_id: Index of the virtual function with error
@ -1761,16 +1727,128 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
}
}
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
/* hclge_handle_mpf_msix_error: handle all main PF MSI-X errors
* @hdev: pointer to struct hclge_dev
* @desc: descriptor for describing the command
* @mpf_bd_num: number of extended command structures
* @reset_requests: record of the reset level that we need
*
* This function handles all the main PF MSI-X errors in the hw register/s
* using command.
*/
static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
struct hclge_desc *desc,
int mpf_bd_num,
unsigned long *reset_requests)
{
struct device *dev = &hdev->pdev->dev;
__le32 *desc_data;
u32 status;
int ret;
/* query all main PF MSIx errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
true);
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) {
dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret);
return ret;
}
/* log MAC errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data);
if (status)
hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
&hclge_mac_afifo_tnl_int[0], status,
reset_requests);
/* log PPU(RCB) MPF errors */
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status)
dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
status);
/* clear all main PF MSIx errors */
ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
if (ret)
dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret);
return ret;
}
/* hclge_handle_pf_msix_error: handle all PF MSI-X errors
* @hdev: pointer to struct hclge_dev
* @desc: descriptor for describing the command
* @mpf_bd_num: number of extended command structures
* @reset_requests: record of the reset level that we need
*
* This function handles all the PF MSI-X errors in the hw register/s using
* command.
*/
static int hclge_handle_pf_msix_error(struct hclge_dev *hdev,
struct hclge_desc *desc,
int pf_bd_num,
unsigned long *reset_requests)
{
struct device *dev = &hdev->pdev->dev;
__le32 *desc_data;
u32 status;
int ret;
/* query all PF MSIx errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
true);
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) {
dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret);
return ret;
}
/* log SSU PF errors */
status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
if (status)
hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
&hclge_ssu_port_based_pf_int[0],
status, reset_requests);
/* read and log PPP PF errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*desc_data);
if (status)
hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
&hclge_ppp_pf_abnormal_int[0],
status, reset_requests);
/* log PPU(RCB) PF errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
if (status)
hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
&hclge_ppu_pf_abnormal_int[0],
status, reset_requests);
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK;
if (status)
hclge_handle_over_8bd_err(hdev, reset_requests);
/* clear all PF MSIx errors */
ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
if (ret)
dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret);
return ret;
}
static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
struct hclge_mac_tnl_stats mac_tnl_stats;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
enum hnae3_reset_type reset_level;
struct hclge_desc desc_bd;
struct hclge_desc *desc;
__le32 *desc_data;
u32 status;
int ret;
@ -1792,98 +1870,15 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (!desc)
goto out;
/* query all main PF MSIx errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
true);
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) {
dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
ret);
ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
reset_requests);
if (ret)
goto msi_error;
}
/* log MAC errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data);
if (status) {
reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
&hclge_mac_afifo_tnl_int[0],
status);
set_bit(reset_level, reset_requests);
}
/* log PPU(RCB) MPF errors */
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status) {
reset_level =
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
&hclge_ppu_mpf_abnormal_int_st2[0],
status);
set_bit(reset_level, reset_requests);
}
/* clear all main PF MSIx errors */
hclge_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) {
dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
ret);
goto msi_error;
}
/* query all PF MSIx errors */
memset(desc, 0, bd_num * sizeof(struct hclge_desc));
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
true);
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) {
dev_err(dev, "query all pf msix int cmd failed (%d)\n",
ret);
ret = hclge_handle_pf_msix_error(hdev, desc, pf_bd_num, reset_requests);
if (ret)
goto msi_error;
}
/* log SSU PF errors */
status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
if (status) {
reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
&hclge_ssu_port_based_pf_int[0],
status);
set_bit(reset_level, reset_requests);
}
/* read and log PPP PF errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*desc_data);
if (status) {
reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
&hclge_ppp_pf_abnormal_int[0],
status);
set_bit(reset_level, reset_requests);
}
/* log PPU(RCB) PF errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
if (status) {
reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
&hclge_ppu_pf_abnormal_int[0],
status);
set_bit(reset_level, reset_requests);
}
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK;
if (status)
hclge_handle_over_8bd_err(hdev, reset_requests);
/* clear all PF MSIx errors */
hclge_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) {
dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
ret);
}
/* query and clear mac tnl interruptions */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
@ -1914,3 +1909,79 @@ msi_error:
out:
return ret;
}
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
struct device *dev = &hdev->pdev->dev;
if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
dev_err(dev,
"Can't handle - MSIx error reported during dev init\n");
return 0;
}
return hclge_handle_all_hw_msix_error(hdev, reset_requests);
}
void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
{
#define HCLGE_DESC_NO_DATA_LEN 8
struct hclge_dev *hdev = ae_dev->priv;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
struct hclge_desc desc_bd;
struct hclge_desc *desc;
u32 status;
int ret;
ae_dev->hw_err_reset_req = 0;
status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
/* query the number of bds for the MSIx int status */
hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
true);
ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
if (ret) {
dev_err(dev, "fail(%d) to query msix int status bd num\n",
ret);
return;
}
mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
pf_bd_num = le32_to_cpu(desc_bd.data[1]);
bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return;
/* Clear HNS hw errors reported through msix */
memset(&desc[0].data[0], 0xFF, mpf_bd_num * sizeof(struct hclge_desc) -
HCLGE_DESC_NO_DATA_LEN);
ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
if (ret) {
dev_err(dev, "fail(%d) to clear mpf msix int during init\n",
ret);
goto msi_error;
}
memset(&desc[0].data[0], 0xFF, pf_bd_num * sizeof(struct hclge_desc) -
HCLGE_DESC_NO_DATA_LEN);
ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
if (ret) {
dev_err(dev, "fail(%d) to clear pf msix int during init\n",
ret);
goto msi_error;
}
/* Handle Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
dev_warn(dev, "HNS hw error(RAS) identified during init\n");
hclge_handle_all_ras_errors(hdev);
}
msi_error:
kfree(desc);
}

View File

@ -81,7 +81,7 @@
#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0)
#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK BIT(29)
#define HCLGE_PPU_PF_INT_RAS_MASK 0x18
#define HCLGE_PPU_PF_INT_MSIX_MASK 0x26
#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01
@ -123,6 +123,7 @@ struct hclge_hw_error {
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests);

View File

@ -27,7 +27,7 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256
#define HCLGE_BUF_SIZE_UNIT 256U
#define HCLGE_BUF_MUL_BY 2
#define HCLGE_BUF_DIV_BY 2
@ -41,6 +41,8 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
u16 *allocated_size, bool is_alloc);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr);
static struct hnae3_ae_algo ae_algo;
@ -534,7 +536,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
return buff;
}
static u64 *hclge_comm_get_stats(void *comm_stats,
static u64 *hclge_comm_get_stats(const void *comm_stats,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
@ -1074,7 +1076,7 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
u64 mac_addr_tmp;
int i;
unsigned int i;
req = (struct hclge_cfg_param_cmd *)desc[0].data;
@ -1136,7 +1138,8 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
{
struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
struct hclge_cfg_param_cmd *req;
int i, ret;
unsigned int i;
int ret;
for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
u32 offset = 0;
@ -1202,7 +1205,8 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
struct hclge_cfg cfg;
int ret, i;
unsigned int i;
int ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret) {
@ -1265,8 +1269,8 @@ static int hclge_configure(struct hclge_dev *hdev)
return ret;
}
static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
int tso_mss_max)
static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
unsigned int tso_mss_max)
{
struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
@ -1578,7 +1582,8 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
static u32 hclge_get_tc_num(struct hclge_dev *hdev)
{
int i, cnt = 0;
unsigned int i;
u32 cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
@ -1591,7 +1596,8 @@ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
int i, cnt = 0;
unsigned int i;
int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
@ -1608,7 +1614,8 @@ static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
int i, cnt = 0;
unsigned int i;
int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
@ -1738,7 +1745,7 @@ static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
{
u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
int i;
unsigned int i;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
@ -1779,9 +1786,10 @@ static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
unsigned int mask = BIT((unsigned int)i);
if (hdev->hw_tc_map & BIT(i) &&
!(hdev->tm_info.hw_pfc_map & BIT(i))) {
if (hdev->hw_tc_map & mask &&
!(hdev->tm_info.hw_pfc_map & mask)) {
/* Clear the no pfc TC private buffer */
priv->wl.low = 0;
priv->wl.high = 0;
@ -1808,9 +1816,10 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
unsigned int mask = BIT((unsigned int)i);
if (hdev->hw_tc_map & BIT(i) &&
hdev->tm_info.hw_pfc_map & BIT(i)) {
if (hdev->hw_tc_map & mask &&
hdev->tm_info.hw_pfc_map & mask) {
/* Reduce the number of pfc TC with private buffer */
priv->wl.low = 0;
priv->enable = 0;
@ -2449,7 +2458,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
{
int mac_state;
unsigned int mac_state;
int link_stat;
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
@ -2744,8 +2753,8 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
u32 clearval = 0;
u32 event_cause;
u32 clearval;
hclge_enable_vector(&hdev->misc_vector, false);
event_cause = hclge_check_event_cause(hdev, &clearval);
@ -3066,10 +3075,11 @@ static void hclge_do_reset(struct hclge_dev *hdev)
}
}
static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr)
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
@ -3233,6 +3243,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
hclge_clear_reset_cause(hdev);
set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
mod_timer(&hdev->reset_timer,
jiffies + HCLGE_RESET_INTERVAL);
@ -3398,7 +3409,7 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
return;
else if (hdev->default_reset_request)
hdev->reset_level =
hclge_get_reset_level(hdev,
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
hdev->reset_level = HNAE3_FUNC_RESET;
@ -3427,13 +3438,14 @@ static void hclge_reset_timer(struct timer_list *t)
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
dev_info(&hdev->pdev->dev,
"triggering global reset in reset timer\n");
set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
"triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
/* check if there is any ongoing reset in the hardware. This status can
* be checked from reset_pending. If there is then, we need to wait for
* hardware to complete reset.
@ -3444,12 +3456,12 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
* now.
*/
hdev->last_reset_time = jiffies;
hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_reset(hdev);
/* check if we got any *new* reset requests to be honored */
hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_do_reset(hdev);
@ -3613,8 +3625,8 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
struct hclge_rss_config_cmd *req;
unsigned int key_offset = 0;
struct hclge_desc desc;
int key_offset = 0;
int key_counts;
int key_size;
int ret;
@ -3999,7 +4011,8 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 tc_size[HCLGE_MAX_TC_NUM];
u16 roundup_size;
int i, ret;
unsigned int i;
int ret;
ret = hclge_set_rss_indir_table(hdev, rss_indir);
if (ret)
@ -4612,7 +4625,7 @@ static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
{
u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
u8 cur_pos = 0, tuple_size, shift_bits;
int i;
unsigned int i;
for (i = 0; i < MAX_META_DATA; i++) {
tuple_size = meta_data_key_info[i].key_length;
@ -4654,7 +4667,8 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
u8 *cur_key_x, *cur_key_y;
int i, ret, tuple_size;
unsigned int i;
int ret, tuple_size;
u8 meta_data_region;
memset(key_x, 0, sizeof(key_x));
@ -5978,7 +5992,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
return -EBUSY;
}
static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
struct hclge_desc desc;
@ -5989,7 +6003,8 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
req->enable |= enable << HCLGE_TQP_ENABLE_B;
if (enable)
req->enable |= 1U << HCLGE_TQP_ENABLE_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@ -7019,7 +7034,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan, u8 qos,
__be16 proto)
{
@ -8123,7 +8138,8 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct phy_device *phydev = hdev->hw.mac.phydev;
int mdix_ctrl, mdix, retval, is_resolved;
int mdix_ctrl, mdix, is_resolved;
unsigned int retval;
if (!phydev) {
*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
@ -8445,6 +8461,23 @@ static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
{
u16 i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
int ret;
/* Send cmd to clear VF's FUNC_RST_ING */
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
"clear vf(%d) rst failed %d!\n",
vport->vport_id, ret);
}
}
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@ -8605,6 +8638,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
/* Log and clear the hw errors those already occurred */
hclge_handle_all_hns_hw_errors(ae_dev);
/* request delayed reset for the error recovery because an immediate
* global reset on a PF affecting pending initialization of other PFs
*/
if (ae_dev->hw_err_reset_req) {
enum hnae3_reset_type reset_level;
reset_level = hclge_get_reset_level(ae_dev,
&ae_dev->hw_err_reset_req);
hclge_set_def_reset_request(ae_dev, reset_level);
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
}
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
@ -8817,7 +8866,8 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
u16 tc_size[HCLGE_MAX_TC_NUM];
u16 roundup_size;
u32 *rss_indir;
int ret, i;
unsigned int i;
int ret;
kinfo->req_rss_size = new_tqps_num;
@ -9231,6 +9281,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
.get_reset_level = hclge_get_reset_level,
.set_default_reset_request = hclge_set_def_reset_request,
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,

View File

@ -701,6 +701,17 @@ struct hclge_mac_tnl_stats {
#define HCLGE_RESET_INTERVAL (10 * HZ)
#pragma pack(1)
struct hclge_vf_vlan_cfg {
u8 mbx_cmd;
u8 subcode;
u8 is_kill;
u16 vlan;
u16 proto;
};
#pragma pack()
/* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below:
* ----------------------------------
@ -924,7 +935,7 @@ struct hclge_vport {
u16 used_umv_num;
int vport_id;
u16 vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
@ -986,7 +997,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);

View File

@ -306,21 +306,23 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
struct hclge_vf_vlan_cfg *msg_cmd;
int status = 0;
if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto;
bool is_kill;
is_kill = !!mbx_req->msg[2];
memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
is_kill = !!msg_cmd->is_kill;
vlan = msg_cmd->vlan;
proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = mbx_req->msg[2] ? true : false;
bool en = msg_cmd->is_kill ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en);
} else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
@ -363,7 +365,8 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u8 vf_tc_map = 0;
int i, ret;
unsigned int i;
int ret;
for (i = 0; i < kinfo->num_tc; i++)
vf_tc_map |= BIT(i);
@ -551,7 +554,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
struct hclge_desc *desc;
int ret, flag;
unsigned int flag;
int ret;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {

View File

@ -55,9 +55,9 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
HCLGE_MDIO_PHYID_S, phyid);
HCLGE_MDIO_PHYID_S, (u32)phyid);
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
HCLGE_MDIO_PHYREG_S, regnum);
HCLGE_MDIO_PHYREG_S, (u32)regnum);
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
@ -93,9 +93,9 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
HCLGE_MDIO_PHYID_S, phyid);
HCLGE_MDIO_PHYID_S, (u32)phyid);
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
HCLGE_MDIO_PHYREG_S, regnum);
HCLGE_MDIO_PHYREG_S, (u32)regnum);
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,

View File

@ -976,7 +976,7 @@ static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
struct hclge_ets_tc_weight_cmd *ets_weight;
struct hclge_desc desc;
int i;
unsigned int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;

View File

@ -382,7 +382,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
struct hnae3_handle *nic = &hdev->nic;
struct hnae3_knic_private_info *kinfo;
u16 new_tqps = hdev->num_tqps;
int i;
unsigned int i;
kinfo = &nic->kinfo;
kinfo->num_tc = 0;
@ -540,8 +540,8 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
const u8 hfunc, const u8 *key)
{
struct hclgevf_rss_config_cmd *req;
unsigned int key_offset = 0;
struct hclgevf_desc desc;
int key_offset = 0;
int key_counts;
int key_size;
int ret;
@ -626,7 +626,7 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
struct hclgevf_desc desc;
u16 roundup_size;
int status;
int i;
unsigned int i;
req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
@ -1129,7 +1129,7 @@ static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
struct hclgevf_cfg_com_tqp_queue_cmd *req;
@ -1142,7 +1142,8 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
if (enable)
req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)