Merge branch 'hns3-cleanups'
Huazhong Tan says: ==================== net: hns3: some cleanups for -next To improve code readability and maintainability, the series refactor out some bloated functions in the HNS3 ethernet driver. change log: V2: remove an unused variable in #5 previous version: V1: https://patchwork.kernel.org/project/netdevbpf/cover/1612943005-59416-1-git-send-email-tanhuazhong@huawei.com/ ==================== Acked-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
c3ff3b02e9
|
@ -423,6 +423,30 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
|
|||
return (*ppos = len);
|
||||
}
|
||||
|
||||
static int hns3_dbg_check_cmd(struct hnae3_handle *handle, char *cmd_buf)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (strncmp(cmd_buf, "help", 4) == 0)
|
||||
hns3_dbg_help(handle);
|
||||
else if (strncmp(cmd_buf, "queue info", 10) == 0)
|
||||
ret = hns3_dbg_queue_info(handle, cmd_buf);
|
||||
else if (strncmp(cmd_buf, "queue map", 9) == 0)
|
||||
ret = hns3_dbg_queue_map(handle);
|
||||
else if (strncmp(cmd_buf, "bd info", 7) == 0)
|
||||
ret = hns3_dbg_bd_info(handle, cmd_buf);
|
||||
else if (strncmp(cmd_buf, "dev capability", 14) == 0)
|
||||
hns3_dbg_dev_caps(handle);
|
||||
else if (strncmp(cmd_buf, "dev spec", 8) == 0)
|
||||
hns3_dbg_dev_specs(handle);
|
||||
else if (handle->ae_algo->ops->dbg_run_cmd)
|
||||
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
|
||||
else
|
||||
ret = -EOPNOTSUPP;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
|
@ -430,7 +454,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
|
|||
struct hns3_nic_priv *priv = handle->priv;
|
||||
char *cmd_buf, *cmd_buf_tmp;
|
||||
int uncopied_bytes;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (*ppos != 0)
|
||||
return 0;
|
||||
|
@ -461,23 +485,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
|
|||
count = cmd_buf_tmp - cmd_buf + 1;
|
||||
}
|
||||
|
||||
if (strncmp(cmd_buf, "help", 4) == 0)
|
||||
hns3_dbg_help(handle);
|
||||
else if (strncmp(cmd_buf, "queue info", 10) == 0)
|
||||
ret = hns3_dbg_queue_info(handle, cmd_buf);
|
||||
else if (strncmp(cmd_buf, "queue map", 9) == 0)
|
||||
ret = hns3_dbg_queue_map(handle);
|
||||
else if (strncmp(cmd_buf, "bd info", 7) == 0)
|
||||
ret = hns3_dbg_bd_info(handle, cmd_buf);
|
||||
else if (strncmp(cmd_buf, "dev capability", 14) == 0)
|
||||
hns3_dbg_dev_caps(handle);
|
||||
else if (strncmp(cmd_buf, "dev spec", 8) == 0)
|
||||
hns3_dbg_dev_specs(handle);
|
||||
else if (handle->ae_algo->ops->dbg_run_cmd)
|
||||
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
|
||||
else
|
||||
ret = -EOPNOTSUPP;
|
||||
|
||||
ret = hns3_dbg_check_cmd(handle, cmd_buf);
|
||||
if (ret)
|
||||
hns3_dbg_help(handle);
|
||||
|
||||
|
|
|
@ -189,36 +189,51 @@ static bool hclge_is_special_opcode(u16 opcode)
|
|||
return false;
|
||||
}
|
||||
|
||||
struct errcode {
|
||||
u32 imp_errcode;
|
||||
int common_errno;
|
||||
};
|
||||
|
||||
static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
|
||||
int num)
|
||||
{
|
||||
struct hclge_desc *desc_to_use;
|
||||
int handle = 0;
|
||||
|
||||
while (handle < num) {
|
||||
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
|
||||
*desc_to_use = desc[handle];
|
||||
(hw->cmq.csq.next_to_use)++;
|
||||
if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
|
||||
hw->cmq.csq.next_to_use = 0;
|
||||
handle++;
|
||||
}
|
||||
}
|
||||
|
||||
static int hclge_cmd_convert_err_code(u16 desc_ret)
|
||||
{
|
||||
switch (desc_ret) {
|
||||
case HCLGE_CMD_EXEC_SUCCESS:
|
||||
return 0;
|
||||
case HCLGE_CMD_NO_AUTH:
|
||||
return -EPERM;
|
||||
case HCLGE_CMD_NOT_SUPPORTED:
|
||||
return -EOPNOTSUPP;
|
||||
case HCLGE_CMD_QUEUE_FULL:
|
||||
return -EXFULL;
|
||||
case HCLGE_CMD_NEXT_ERR:
|
||||
return -ENOSR;
|
||||
case HCLGE_CMD_UNEXE_ERR:
|
||||
return -ENOTBLK;
|
||||
case HCLGE_CMD_PARA_ERR:
|
||||
return -EINVAL;
|
||||
case HCLGE_CMD_RESULT_ERR:
|
||||
return -ERANGE;
|
||||
case HCLGE_CMD_TIMEOUT:
|
||||
return -ETIME;
|
||||
case HCLGE_CMD_HILINK_ERR:
|
||||
return -ENOLINK;
|
||||
case HCLGE_CMD_QUEUE_ILLEGAL:
|
||||
return -ENXIO;
|
||||
case HCLGE_CMD_INVALID:
|
||||
return -EBADR;
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
struct errcode hclge_cmd_errcode[] = {
|
||||
{HCLGE_CMD_EXEC_SUCCESS, 0},
|
||||
{HCLGE_CMD_NO_AUTH, -EPERM},
|
||||
{HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
|
||||
{HCLGE_CMD_QUEUE_FULL, -EXFULL},
|
||||
{HCLGE_CMD_NEXT_ERR, -ENOSR},
|
||||
{HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
|
||||
{HCLGE_CMD_PARA_ERR, -EINVAL},
|
||||
{HCLGE_CMD_RESULT_ERR, -ERANGE},
|
||||
{HCLGE_CMD_TIMEOUT, -ETIME},
|
||||
{HCLGE_CMD_HILINK_ERR, -ENOLINK},
|
||||
{HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
|
||||
{HCLGE_CMD_INVALID, -EBADR},
|
||||
};
|
||||
u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < errcode_count; i++)
|
||||
if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
|
||||
return hclge_cmd_errcode[i].common_errno;
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
|
||||
|
@ -244,6 +259,44 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
|
|||
return hclge_cmd_convert_err_code(desc_ret);
|
||||
}
|
||||
|
||||
static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
|
||||
int num, int ntc)
|
||||
{
|
||||
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
|
||||
bool is_completed = false;
|
||||
u32 timeout = 0;
|
||||
int handle, ret;
|
||||
|
||||
/**
|
||||
* If the command is sync, wait for the firmware to write back,
|
||||
* if multi descriptors to be sent, use the first one to check
|
||||
*/
|
||||
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
|
||||
do {
|
||||
if (hclge_cmd_csq_done(hw)) {
|
||||
is_completed = true;
|
||||
break;
|
||||
}
|
||||
udelay(1);
|
||||
timeout++;
|
||||
} while (timeout < hw->cmq.tx_timeout);
|
||||
}
|
||||
|
||||
if (!is_completed)
|
||||
ret = -EBADE;
|
||||
else
|
||||
ret = hclge_cmd_check_retval(hw, desc, num, ntc);
|
||||
|
||||
/* Clean the command send queue */
|
||||
handle = hclge_cmd_csq_clean(hw);
|
||||
if (handle < 0)
|
||||
ret = handle;
|
||||
else if (handle != num)
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"cleaned %d, need to clean %d\n", handle, num);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* hclge_cmd_send - send command to command queue
|
||||
* @hw: pointer to the hw struct
|
||||
|
@ -257,11 +310,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
|
|||
{
|
||||
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
|
||||
struct hclge_cmq_ring *csq = &hw->cmq.csq;
|
||||
struct hclge_desc *desc_to_use;
|
||||
bool complete = false;
|
||||
u32 timeout = 0;
|
||||
int handle = 0;
|
||||
int retval;
|
||||
int ret;
|
||||
int ntc;
|
||||
|
||||
spin_lock_bh(&hw->cmq.csq.lock);
|
||||
|
@ -285,49 +334,17 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
|
|||
* which will be use for hardware to write back
|
||||
*/
|
||||
ntc = hw->cmq.csq.next_to_use;
|
||||
while (handle < num) {
|
||||
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
|
||||
*desc_to_use = desc[handle];
|
||||
(hw->cmq.csq.next_to_use)++;
|
||||
if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
|
||||
hw->cmq.csq.next_to_use = 0;
|
||||
handle++;
|
||||
}
|
||||
|
||||
hclge_cmd_copy_desc(hw, desc, num);
|
||||
|
||||
/* Write to hardware */
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
|
||||
|
||||
/**
|
||||
* If the command is sync, wait for the firmware to write back,
|
||||
* if multi descriptors to be sent, use the first one to check
|
||||
*/
|
||||
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
|
||||
do {
|
||||
if (hclge_cmd_csq_done(hw)) {
|
||||
complete = true;
|
||||
break;
|
||||
}
|
||||
udelay(1);
|
||||
timeout++;
|
||||
} while (timeout < hw->cmq.tx_timeout);
|
||||
}
|
||||
|
||||
if (!complete)
|
||||
retval = -EBADE;
|
||||
else
|
||||
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
|
||||
|
||||
/* Clean the command send queue */
|
||||
handle = hclge_cmd_csq_clean(hw);
|
||||
if (handle < 0)
|
||||
retval = handle;
|
||||
else if (handle != num)
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"cleaned %d, need to clean %d\n", handle, num);
|
||||
ret = hclge_cmd_check_result(hw, desc, num, ntc);
|
||||
|
||||
spin_unlock_bh(&hw->cmq.csq.lock);
|
||||
|
||||
return retval;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclge_set_default_capability(struct hclge_dev *hdev)
|
||||
|
|
|
@ -984,39 +984,39 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
|
|||
dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
|
||||
}
|
||||
|
||||
static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
|
||||
static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
|
||||
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
|
||||
struct hclge_rx_priv_wl_buf *rx_priv_wl;
|
||||
struct hclge_rx_com_wl *rx_packet_cnt;
|
||||
struct hclge_rx_com_thrd *rx_com_thrd;
|
||||
struct hclge_rx_com_wl *rx_com_wl;
|
||||
enum hclge_opcode_type cmd;
|
||||
struct hclge_desc desc[2];
|
||||
struct hclge_desc desc;
|
||||
int i, ret;
|
||||
|
||||
cmd = HCLGE_OPC_TX_BUFF_ALLOC;
|
||||
hclge_cmd_setup_basic_desc(desc, cmd, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, 1);
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
return ret;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
|
||||
|
||||
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
|
||||
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
|
||||
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
|
||||
le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
|
||||
|
||||
cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
|
||||
hclge_cmd_setup_basic_desc(desc, cmd, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
|
||||
struct hclge_desc desc;
|
||||
int i, ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
return ret;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "\n");
|
||||
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
|
||||
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
|
||||
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
|
||||
le16_to_cpu(rx_buf_cmd->buf_num[i]));
|
||||
|
@ -1024,43 +1024,61 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
|
|||
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
|
||||
le16_to_cpu(rx_buf_cmd->shared_buf));
|
||||
|
||||
cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
|
||||
hclge_cmd_setup_basic_desc(desc, cmd, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, 1);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
return 0;
|
||||
}
|
||||
|
||||
rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
|
||||
static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_rx_com_wl *rx_com_wl;
|
||||
struct hclge_desc desc;
|
||||
int ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
|
||||
dev_info(&hdev->pdev->dev, "\n");
|
||||
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
|
||||
le16_to_cpu(rx_com_wl->com_wl.high),
|
||||
le16_to_cpu(rx_com_wl->com_wl.low));
|
||||
|
||||
cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
|
||||
hclge_cmd_setup_basic_desc(desc, cmd, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, 1);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
return 0;
|
||||
}
|
||||
|
||||
rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
|
||||
static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_rx_com_wl *rx_packet_cnt;
|
||||
struct hclge_desc desc;
|
||||
int ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
|
||||
le16_to_cpu(rx_packet_cnt->com_wl.high),
|
||||
le16_to_cpu(rx_packet_cnt->com_wl.low));
|
||||
dev_info(&hdev->pdev->dev, "\n");
|
||||
|
||||
if (!hnae3_dev_dcb_supported(hdev)) {
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"Only DCB-supported dev supports rx priv wl\n");
|
||||
return;
|
||||
}
|
||||
cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
|
||||
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_rx_priv_wl_buf *rx_priv_wl;
|
||||
struct hclge_desc desc[2];
|
||||
int i, ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
|
||||
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
|
||||
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, 2);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
return ret;
|
||||
|
||||
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
|
||||
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
|
||||
|
@ -1077,13 +1095,21 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
|
|||
le16_to_cpu(rx_priv_wl->tc_wl[i].high),
|
||||
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
|
||||
|
||||
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
|
||||
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_rx_com_thrd *rx_com_thrd;
|
||||
struct hclge_desc desc[2];
|
||||
int i, ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
|
||||
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
|
||||
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, 2);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
return ret;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "\n");
|
||||
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
|
||||
|
@ -1100,6 +1126,52 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
|
|||
i + HCLGE_TC_NUM_ONE_DESC,
|
||||
le16_to_cpu(rx_com_thrd->com_thrd[i].high),
|
||||
le16_to_cpu(rx_com_thrd->com_thrd[i].low));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
enum hclge_opcode_type cmd;
|
||||
int ret;
|
||||
|
||||
cmd = HCLGE_OPC_TX_BUFF_ALLOC;
|
||||
ret = hclge_dbg_dump_tx_buf_cfg(hdev);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
|
||||
cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
|
||||
ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
|
||||
cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
|
||||
ret = hclge_dbg_dump_rx_common_wl_cfg(hdev);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
|
||||
cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
|
||||
ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "\n");
|
||||
if (!hnae3_dev_dcb_supported(hdev)) {
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"Only DCB-supported dev supports rx priv wl\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
|
||||
ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
|
||||
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
|
||||
ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev);
|
||||
if (ret)
|
||||
goto err_qos_cmd_send;
|
||||
|
||||
return;
|
||||
|
||||
err_qos_cmd_send:
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include "hclge_cmd.h"
|
||||
#include "hclge_dcb.h"
|
||||
|
@ -4500,22 +4501,12 @@ static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
|
|||
return hash_sets;
|
||||
}
|
||||
|
||||
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *nfc)
|
||||
static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
|
||||
struct ethtool_rxnfc *nfc,
|
||||
struct hclge_rss_input_tuple_cmd *req)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_rss_input_tuple_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
u8 tuple_sets;
|
||||
int ret;
|
||||
|
||||
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
|
||||
RXH_L4_B_0_1 | RXH_L4_B_2_3))
|
||||
return -EINVAL;
|
||||
|
||||
req = (struct hclge_rss_input_tuple_cmd *)desc.data;
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
|
||||
|
||||
req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
|
||||
req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
|
||||
|
@ -4560,6 +4551,32 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *nfc)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_rss_input_tuple_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
int ret;
|
||||
|
||||
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
|
||||
RXH_L4_B_0_1 | RXH_L4_B_2_3))
|
||||
return -EINVAL;
|
||||
|
||||
req = (struct hclge_rss_input_tuple_cmd *)desc.data;
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
|
||||
|
||||
ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to init rss tuple cmd, ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
|
@ -4579,52 +4596,69 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *nfc)
|
||||
static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
|
||||
u8 *tuple_sets)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
u8 tuple_sets;
|
||||
|
||||
nfc->data = 0;
|
||||
|
||||
switch (nfc->flow_type) {
|
||||
switch (flow_type) {
|
||||
case TCP_V4_FLOW:
|
||||
tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
|
||||
*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
|
||||
break;
|
||||
case UDP_V4_FLOW:
|
||||
tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
|
||||
*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
|
||||
break;
|
||||
case TCP_V6_FLOW:
|
||||
tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
|
||||
*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
|
||||
break;
|
||||
case UDP_V6_FLOW:
|
||||
tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
|
||||
*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
|
||||
break;
|
||||
case SCTP_V4_FLOW:
|
||||
tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
|
||||
*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
|
||||
break;
|
||||
case SCTP_V6_FLOW:
|
||||
tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
|
||||
*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
|
||||
break;
|
||||
case IPV4_FLOW:
|
||||
case IPV6_FLOW:
|
||||
tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
|
||||
*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!tuple_sets)
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 hclge_convert_rss_tuple(u8 tuple_sets)
|
||||
{
|
||||
u64 tuple_data = 0;
|
||||
|
||||
if (tuple_sets & HCLGE_D_PORT_BIT)
|
||||
nfc->data |= RXH_L4_B_2_3;
|
||||
tuple_data |= RXH_L4_B_2_3;
|
||||
if (tuple_sets & HCLGE_S_PORT_BIT)
|
||||
nfc->data |= RXH_L4_B_0_1;
|
||||
tuple_data |= RXH_L4_B_0_1;
|
||||
if (tuple_sets & HCLGE_D_IP_BIT)
|
||||
nfc->data |= RXH_IP_DST;
|
||||
tuple_data |= RXH_IP_DST;
|
||||
if (tuple_sets & HCLGE_S_IP_BIT)
|
||||
nfc->data |= RXH_IP_SRC;
|
||||
tuple_data |= RXH_IP_SRC;
|
||||
|
||||
return tuple_data;
|
||||
}
|
||||
|
||||
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *nfc)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
u8 tuple_sets;
|
||||
int ret;
|
||||
|
||||
nfc->data = 0;
|
||||
|
||||
ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
|
||||
if (ret || !tuple_sets)
|
||||
return ret;
|
||||
|
||||
nfc->data = hclge_convert_rss_tuple(tuple_sets);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5508,12 +5542,10 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
|
|||
BIT(INNER_IP_TOS);
|
||||
|
||||
/* check whether src/dst ip address used */
|
||||
if (!spec->ip6src[0] && !spec->ip6src[1] &&
|
||||
!spec->ip6src[2] && !spec->ip6src[3])
|
||||
if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
|
||||
*unused_tuple |= BIT(INNER_SRC_IP);
|
||||
|
||||
if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
|
||||
!spec->ip6dst[2] && !spec->ip6dst[3])
|
||||
if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
|
||||
*unused_tuple |= BIT(INNER_DST_IP);
|
||||
|
||||
if (!spec->psrc)
|
||||
|
@ -5538,12 +5570,10 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
|
|||
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
|
||||
|
||||
/* check whether src/dst ip address used */
|
||||
if (!spec->ip6src[0] && !spec->ip6src[1] &&
|
||||
!spec->ip6src[2] && !spec->ip6src[3])
|
||||
if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
|
||||
*unused_tuple |= BIT(INNER_SRC_IP);
|
||||
|
||||
if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
|
||||
!spec->ip6dst[2] && !spec->ip6dst[3])
|
||||
if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
|
||||
*unused_tuple |= BIT(INNER_DST_IP);
|
||||
|
||||
if (!spec->l4_proto)
|
||||
|
@ -8323,36 +8353,18 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev)
|
|||
}
|
||||
}
|
||||
|
||||
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
|
||||
enum HCLGE_MAC_ADDR_TYPE mac_type)
|
||||
static void hclge_build_del_list(struct list_head *list,
|
||||
bool is_del_list,
|
||||
struct list_head *tmp_del_list)
|
||||
{
|
||||
int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
|
||||
struct hclge_mac_node *mac_cfg, *tmp;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct list_head tmp_del_list, *list;
|
||||
int ret;
|
||||
|
||||
if (mac_type == HCLGE_MAC_ADDR_UC) {
|
||||
list = &vport->uc_mac_list;
|
||||
unsync = hclge_rm_uc_addr_common;
|
||||
} else {
|
||||
list = &vport->mc_mac_list;
|
||||
unsync = hclge_rm_mc_addr_common;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&tmp_del_list);
|
||||
|
||||
if (!is_del_list)
|
||||
set_bit(vport->vport_id, hdev->vport_config_block);
|
||||
|
||||
spin_lock_bh(&vport->mac_list_lock);
|
||||
|
||||
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
|
||||
switch (mac_cfg->state) {
|
||||
case HCLGE_MAC_TO_DEL:
|
||||
case HCLGE_MAC_ACTIVE:
|
||||
list_del(&mac_cfg->node);
|
||||
list_add_tail(&mac_cfg->node, &tmp_del_list);
|
||||
list_add_tail(&mac_cfg->node, tmp_del_list);
|
||||
break;
|
||||
case HCLGE_MAC_TO_ADD:
|
||||
if (is_del_list) {
|
||||
|
@ -8362,10 +8374,18 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
|
|||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&vport->mac_list_lock);
|
||||
static void hclge_unsync_del_list(struct hclge_vport *vport,
|
||||
int (*unsync)(struct hclge_vport *vport,
|
||||
const unsigned char *addr),
|
||||
bool is_del_list,
|
||||
struct list_head *tmp_del_list)
|
||||
{
|
||||
struct hclge_mac_node *mac_cfg, *tmp;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
|
||||
list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
|
||||
ret = unsync(vport, mac_cfg->mac_addr);
|
||||
if (!ret || ret == -ENOENT) {
|
||||
/* clear all mac addr from hardware, but remain these
|
||||
|
@ -8383,6 +8403,35 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
|
|||
mac_cfg->state = HCLGE_MAC_TO_DEL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
|
||||
enum HCLGE_MAC_ADDR_TYPE mac_type)
|
||||
{
|
||||
int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct list_head tmp_del_list, *list;
|
||||
|
||||
if (mac_type == HCLGE_MAC_ADDR_UC) {
|
||||
list = &vport->uc_mac_list;
|
||||
unsync = hclge_rm_uc_addr_common;
|
||||
} else {
|
||||
list = &vport->mc_mac_list;
|
||||
unsync = hclge_rm_mc_addr_common;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&tmp_del_list);
|
||||
|
||||
if (!is_del_list)
|
||||
set_bit(vport->vport_id, hdev->vport_config_block);
|
||||
|
||||
spin_lock_bh(&vport->mac_list_lock);
|
||||
|
||||
hclge_build_del_list(list, is_del_list, &tmp_del_list);
|
||||
|
||||
spin_unlock_bh(&vport->mac_list_lock);
|
||||
|
||||
hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
|
||||
|
||||
spin_lock_bh(&vport->mac_list_lock);
|
||||
|
||||
|
@ -8789,32 +8838,16 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
|
|||
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
|
||||
}
|
||||
|
||||
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
|
||||
bool is_kill, u16 vlan,
|
||||
__be16 proto)
|
||||
static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
|
||||
bool is_kill, u16 vlan,
|
||||
struct hclge_desc *desc)
|
||||
{
|
||||
struct hclge_vport *vport = &hdev->vport[vfid];
|
||||
struct hclge_vlan_filter_vf_cfg_cmd *req0;
|
||||
struct hclge_vlan_filter_vf_cfg_cmd *req1;
|
||||
struct hclge_desc desc[2];
|
||||
u8 vf_byte_val;
|
||||
u8 vf_byte_off;
|
||||
int ret;
|
||||
|
||||
/* if vf vlan table is full, firmware will close vf vlan filter, it
|
||||
* is unable and unnecessary to add new vlan id to vf vlan filter.
|
||||
* If spoof check is enable, and vf vlan is full, it shouldn't add
|
||||
* new vlan, because tx packets with these vlan id will be dropped.
|
||||
*/
|
||||
if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
|
||||
if (vport->vf_info.spoofchk && vlan) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Can't add vlan due to spoof check is on and vf vlan table is full\n");
|
||||
return -EPERM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc[0],
|
||||
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
|
||||
hclge_cmd_setup_basic_desc(&desc[1],
|
||||
|
@ -8844,12 +8877,22 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
|
|||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
|
||||
bool is_kill, struct hclge_desc *desc)
|
||||
{
|
||||
struct hclge_vlan_filter_vf_cfg_cmd *req;
|
||||
|
||||
req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
|
||||
|
||||
if (!is_kill) {
|
||||
#define HCLGE_VF_VLAN_NO_ENTRY 2
|
||||
if (!req0->resp_code || req0->resp_code == 1)
|
||||
if (!req->resp_code || req->resp_code == 1)
|
||||
return 0;
|
||||
|
||||
if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
|
||||
if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
|
||||
set_bit(vfid, hdev->vf_vlan_full);
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"vf vlan table is full, vf vlan filter is disabled\n");
|
||||
|
@ -8858,10 +8901,10 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
|
|||
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Add vf vlan filter fail, ret =%u.\n",
|
||||
req0->resp_code);
|
||||
req->resp_code);
|
||||
} else {
|
||||
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
|
||||
if (!req0->resp_code)
|
||||
if (!req->resp_code)
|
||||
return 0;
|
||||
|
||||
/* vf vlan filter is disabled when vf vlan table is full,
|
||||
|
@ -8869,17 +8912,46 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
|
|||
* Just return 0 without warning, avoid massive verbose
|
||||
* print logs when unload.
|
||||
*/
|
||||
if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
|
||||
if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
|
||||
return 0;
|
||||
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Kill vf vlan filter fail, ret =%u.\n",
|
||||
req0->resp_code);
|
||||
req->resp_code);
|
||||
}
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
|
||||
bool is_kill, u16 vlan,
|
||||
__be16 proto)
|
||||
{
|
||||
struct hclge_vport *vport = &hdev->vport[vfid];
|
||||
struct hclge_desc desc[2];
|
||||
int ret;
|
||||
|
||||
/* if vf vlan table is full, firmware will close vf vlan filter, it
|
||||
* is unable and unnecessary to add new vlan id to vf vlan filter.
|
||||
* If spoof check is enable, and vf vlan is full, it shouldn't add
|
||||
* new vlan, because tx packets with these vlan id will be dropped.
|
||||
*/
|
||||
if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
|
||||
if (vport->vf_info.spoofchk && vlan) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Can't add vlan due to spoof check is on and vf vlan table is full\n");
|
||||
return -EPERM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
|
||||
}
|
||||
|
||||
static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
|
||||
u16 vlan_id, bool is_kill)
|
||||
{
|
||||
|
|
|
@ -176,36 +176,111 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
|
|||
desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
|
||||
}
|
||||
|
||||
struct vf_errcode {
|
||||
u32 imp_errcode;
|
||||
int common_errno;
|
||||
};
|
||||
|
||||
static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
|
||||
struct hclgevf_desc *desc, int num)
|
||||
{
|
||||
struct hclgevf_desc *desc_to_use;
|
||||
int handle = 0;
|
||||
|
||||
while (handle < num) {
|
||||
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
|
||||
*desc_to_use = desc[handle];
|
||||
(hw->cmq.csq.next_to_use)++;
|
||||
if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
|
||||
hw->cmq.csq.next_to_use = 0;
|
||||
handle++;
|
||||
}
|
||||
}
|
||||
|
||||
static int hclgevf_cmd_convert_err_code(u16 desc_ret)
|
||||
{
|
||||
switch (desc_ret) {
|
||||
case HCLGEVF_CMD_EXEC_SUCCESS:
|
||||
return 0;
|
||||
case HCLGEVF_CMD_NO_AUTH:
|
||||
return -EPERM;
|
||||
case HCLGEVF_CMD_NOT_SUPPORTED:
|
||||
return -EOPNOTSUPP;
|
||||
case HCLGEVF_CMD_QUEUE_FULL:
|
||||
return -EXFULL;
|
||||
case HCLGEVF_CMD_NEXT_ERR:
|
||||
return -ENOSR;
|
||||
case HCLGEVF_CMD_UNEXE_ERR:
|
||||
return -ENOTBLK;
|
||||
case HCLGEVF_CMD_PARA_ERR:
|
||||
return -EINVAL;
|
||||
case HCLGEVF_CMD_RESULT_ERR:
|
||||
return -ERANGE;
|
||||
case HCLGEVF_CMD_TIMEOUT:
|
||||
return -ETIME;
|
||||
case HCLGEVF_CMD_HILINK_ERR:
|
||||
return -ENOLINK;
|
||||
case HCLGEVF_CMD_QUEUE_ILLEGAL:
|
||||
return -ENXIO;
|
||||
case HCLGEVF_CMD_INVALID:
|
||||
return -EBADR;
|
||||
default:
|
||||
return -EIO;
|
||||
struct vf_errcode hclgevf_cmd_errcode[] = {
|
||||
{HCLGEVF_CMD_EXEC_SUCCESS, 0},
|
||||
{HCLGEVF_CMD_NO_AUTH, -EPERM},
|
||||
{HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
|
||||
{HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
|
||||
{HCLGEVF_CMD_NEXT_ERR, -ENOSR},
|
||||
{HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
|
||||
{HCLGEVF_CMD_PARA_ERR, -EINVAL},
|
||||
{HCLGEVF_CMD_RESULT_ERR, -ERANGE},
|
||||
{HCLGEVF_CMD_TIMEOUT, -ETIME},
|
||||
{HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
|
||||
{HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
|
||||
{HCLGEVF_CMD_INVALID, -EBADR},
|
||||
};
|
||||
u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < errcode_count; i++)
|
||||
if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
|
||||
return hclgevf_cmd_errcode[i].common_errno;
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
|
||||
struct hclgevf_desc *desc, int num, int ntc)
|
||||
{
|
||||
u16 opcode, desc_ret;
|
||||
int handle;
|
||||
|
||||
opcode = le16_to_cpu(desc[0].opcode);
|
||||
for (handle = 0; handle < num; handle++) {
|
||||
/* Get the result of hardware write back */
|
||||
desc[handle] = hw->cmq.csq.desc[ntc];
|
||||
ntc++;
|
||||
if (ntc == hw->cmq.csq.desc_num)
|
||||
ntc = 0;
|
||||
}
|
||||
if (likely(!hclgevf_is_special_opcode(opcode)))
|
||||
desc_ret = le16_to_cpu(desc[num - 1].retval);
|
||||
else
|
||||
desc_ret = le16_to_cpu(desc[0].retval);
|
||||
hw->cmq.last_status = desc_ret;
|
||||
|
||||
return hclgevf_cmd_convert_err_code(desc_ret);
|
||||
}
|
||||
|
||||
static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
|
||||
struct hclgevf_desc *desc, int num, int ntc)
|
||||
{
|
||||
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
|
||||
bool is_completed = false;
|
||||
u32 timeout = 0;
|
||||
int handle, ret;
|
||||
|
||||
/* If the command is sync, wait for the firmware to write back,
|
||||
* if multi descriptors to be sent, use the first one to check
|
||||
*/
|
||||
if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
|
||||
do {
|
||||
if (hclgevf_cmd_csq_done(hw)) {
|
||||
is_completed = true;
|
||||
break;
|
||||
}
|
||||
udelay(1);
|
||||
timeout++;
|
||||
} while (timeout < hw->cmq.tx_timeout);
|
||||
}
|
||||
|
||||
if (!is_completed)
|
||||
ret = -EBADE;
|
||||
else
|
||||
ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
|
||||
|
||||
/* Clean the command send queue */
|
||||
handle = hclgevf_cmd_csq_clean(hw);
|
||||
if (handle < 0)
|
||||
ret = handle;
|
||||
else if (handle != num)
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"cleaned %d, need to clean %d\n", handle, num);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* hclgevf_cmd_send - send command to command queue
|
||||
|
@ -220,13 +295,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
|
|||
{
|
||||
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
|
||||
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
|
||||
struct hclgevf_desc *desc_to_use;
|
||||
bool complete = false;
|
||||
u32 timeout = 0;
|
||||
int handle = 0;
|
||||
int status = 0;
|
||||
u16 retval;
|
||||
u16 opcode;
|
||||
int ret;
|
||||
int ntc;
|
||||
|
||||
spin_lock_bh(&hw->cmq.csq.lock);
|
||||
|
@ -250,67 +319,18 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
|
|||
* which will be use for hardware to write back
|
||||
*/
|
||||
ntc = hw->cmq.csq.next_to_use;
|
||||
opcode = le16_to_cpu(desc[0].opcode);
|
||||
while (handle < num) {
|
||||
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
|
||||
*desc_to_use = desc[handle];
|
||||
(hw->cmq.csq.next_to_use)++;
|
||||
if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
|
||||
hw->cmq.csq.next_to_use = 0;
|
||||
handle++;
|
||||
}
|
||||
|
||||
hclgevf_cmd_copy_desc(hw, desc, num);
|
||||
|
||||
/* Write to hardware */
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
|
||||
hw->cmq.csq.next_to_use);
|
||||
|
||||
/* If the command is sync, wait for the firmware to write back,
|
||||
* if multi descriptors to be sent, use the first one to check
|
||||
*/
|
||||
if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
|
||||
do {
|
||||
if (hclgevf_cmd_csq_done(hw))
|
||||
break;
|
||||
udelay(1);
|
||||
timeout++;
|
||||
} while (timeout < hw->cmq.tx_timeout);
|
||||
}
|
||||
|
||||
if (hclgevf_cmd_csq_done(hw)) {
|
||||
complete = true;
|
||||
handle = 0;
|
||||
|
||||
while (handle < num) {
|
||||
/* Get the result of hardware write back */
|
||||
desc_to_use = &hw->cmq.csq.desc[ntc];
|
||||
desc[handle] = *desc_to_use;
|
||||
|
||||
if (likely(!hclgevf_is_special_opcode(opcode)))
|
||||
retval = le16_to_cpu(desc[handle].retval);
|
||||
else
|
||||
retval = le16_to_cpu(desc[0].retval);
|
||||
|
||||
status = hclgevf_cmd_convert_err_code(retval);
|
||||
hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
|
||||
ntc++;
|
||||
handle++;
|
||||
if (ntc == hw->cmq.csq.desc_num)
|
||||
ntc = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!complete)
|
||||
status = -EBADE;
|
||||
|
||||
/* Clean the command send queue */
|
||||
handle = hclgevf_cmd_csq_clean(hw);
|
||||
if (handle != num)
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"cleaned %d, need to clean %d\n", handle, num);
|
||||
ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
|
||||
|
||||
spin_unlock_bh(&hw->cmq.csq.lock);
|
||||
|
||||
return status;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
|
||||
|
|
|
@ -873,25 +873,13 @@ static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
|
|||
return hash_sets;
|
||||
}
|
||||
|
||||
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *nfc)
|
||||
static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *nfc,
|
||||
struct hclgevf_rss_input_tuple_cmd *req)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
|
||||
struct hclgevf_rss_input_tuple_cmd *req;
|
||||
struct hclgevf_desc desc;
|
||||
u8 tuple_sets;
|
||||
int ret;
|
||||
|
||||
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (nfc->data &
|
||||
~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
|
||||
return -EINVAL;
|
||||
|
||||
req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
|
||||
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
|
||||
|
||||
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
|
||||
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
|
||||
|
@ -936,6 +924,35 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *nfc)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
|
||||
struct hclgevf_rss_input_tuple_cmd *req;
|
||||
struct hclgevf_desc desc;
|
||||
int ret;
|
||||
|
||||
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (nfc->data &
|
||||
~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
|
||||
return -EINVAL;
|
||||
|
||||
req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
|
||||
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
|
||||
|
||||
ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to init rss tuple cmd, ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
|
@ -954,56 +971,73 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
|
||||
int flow_type, u8 *tuple_sets)
|
||||
{
|
||||
switch (flow_type) {
|
||||
case TCP_V4_FLOW:
|
||||
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
|
||||
break;
|
||||
case UDP_V4_FLOW:
|
||||
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
|
||||
break;
|
||||
case TCP_V6_FLOW:
|
||||
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
|
||||
break;
|
||||
case UDP_V6_FLOW:
|
||||
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
|
||||
break;
|
||||
case SCTP_V4_FLOW:
|
||||
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
|
||||
break;
|
||||
case SCTP_V6_FLOW:
|
||||
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
|
||||
break;
|
||||
case IPV4_FLOW:
|
||||
case IPV6_FLOW:
|
||||
*tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
|
||||
{
|
||||
u64 tuple_data = 0;
|
||||
|
||||
if (tuple_sets & HCLGEVF_D_PORT_BIT)
|
||||
tuple_data |= RXH_L4_B_2_3;
|
||||
if (tuple_sets & HCLGEVF_S_PORT_BIT)
|
||||
tuple_data |= RXH_L4_B_0_1;
|
||||
if (tuple_sets & HCLGEVF_D_IP_BIT)
|
||||
tuple_data |= RXH_IP_DST;
|
||||
if (tuple_sets & HCLGEVF_S_IP_BIT)
|
||||
tuple_data |= RXH_IP_SRC;
|
||||
|
||||
return tuple_data;
|
||||
}
|
||||
|
||||
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *nfc)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
|
||||
u8 tuple_sets;
|
||||
int ret;
|
||||
|
||||
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
nfc->data = 0;
|
||||
|
||||
switch (nfc->flow_type) {
|
||||
case TCP_V4_FLOW:
|
||||
tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
|
||||
break;
|
||||
case UDP_V4_FLOW:
|
||||
tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
|
||||
break;
|
||||
case TCP_V6_FLOW:
|
||||
tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
|
||||
break;
|
||||
case UDP_V6_FLOW:
|
||||
tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
|
||||
break;
|
||||
case SCTP_V4_FLOW:
|
||||
tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
|
||||
break;
|
||||
case SCTP_V6_FLOW:
|
||||
tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
|
||||
break;
|
||||
case IPV4_FLOW:
|
||||
case IPV6_FLOW:
|
||||
tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
|
||||
&tuple_sets);
|
||||
if (ret || !tuple_sets)
|
||||
return ret;
|
||||
|
||||
if (!tuple_sets)
|
||||
return 0;
|
||||
|
||||
if (tuple_sets & HCLGEVF_D_PORT_BIT)
|
||||
nfc->data |= RXH_L4_B_2_3;
|
||||
if (tuple_sets & HCLGEVF_S_PORT_BIT)
|
||||
nfc->data |= RXH_L4_B_0_1;
|
||||
if (tuple_sets & HCLGEVF_D_IP_BIT)
|
||||
nfc->data |= RXH_IP_DST;
|
||||
if (tuple_sets & HCLGEVF_S_IP_BIT)
|
||||
nfc->data |= RXH_IP_SRC;
|
||||
nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue