2017-12-15 02:03:07 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
// Copyright (c) 2016-2017 Hisilicon Limited.
|
|
|
|
|
|
|
|
#include "hclge_main.h"
|
|
|
|
#include "hclge_mbx.h"
|
|
|
|
#include "hnae3.h"
|
|
|
|
|
|
|
|
/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
|
|
|
|
* receives a mailbox message from VF.
|
|
|
|
* @vport: pointer to struct hclge_vport
|
|
|
|
* @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
|
|
|
|
* message
|
|
|
|
* @resp_status: indicate to VF whether its request success(0) or failed.
|
|
|
|
*/
|
|
|
|
static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
|
|
|
|
int resp_status,
|
|
|
|
u8 *resp_data, u16 resp_data_len)
|
|
|
|
{
|
|
|
|
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
enum hclge_cmd_status status;
|
|
|
|
struct hclge_desc desc;
|
|
|
|
|
|
|
|
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
|
|
|
|
|
|
|
|
if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"PF fail to gen resp to VF len %u exceeds max len %u\n",
|
2017-12-15 02:03:07 +08:00
|
|
|
resp_data_len,
|
|
|
|
HCLGE_MBX_MAX_RESP_DATA_SIZE);
|
2019-07-04 22:04:28 +08:00
|
|
|
/* If resp_data_len is too long, set the value to max length
|
|
|
|
* and return the msg to VF
|
|
|
|
*/
|
|
|
|
resp_data_len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
|
2017-12-15 02:03:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
|
|
|
|
|
|
|
|
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
|
|
|
|
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
|
|
|
|
|
|
|
|
resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
|
|
|
|
resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
|
|
|
|
resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
|
|
|
|
resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
|
|
|
|
|
|
|
|
if (resp_data && resp_data_len > 0)
|
|
|
|
memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
|
|
|
|
|
|
|
|
status = hclge_cmd_send(&hdev->hw, &desc, 1);
|
|
|
|
if (status)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(=%d) to send response to VF\n", status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
|
|
|
|
u16 mbx_opcode, u8 dest_vfid)
|
|
|
|
{
|
|
|
|
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
enum hclge_cmd_status status;
|
|
|
|
struct hclge_desc desc;
|
|
|
|
|
|
|
|
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
|
|
|
|
|
|
|
|
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
|
|
|
|
|
|
|
|
resp_pf_to_vf->dest_vfid = dest_vfid;
|
|
|
|
resp_pf_to_vf->msg_len = msg_len;
|
|
|
|
resp_pf_to_vf->msg[0] = mbx_opcode;
|
|
|
|
|
|
|
|
memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
|
|
|
|
|
|
|
|
status = hclge_cmd_send(&hdev->hw, &desc, 1);
|
|
|
|
if (status)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(=%d) to send mailbox message to VF\n",
|
|
|
|
status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
net: hns3: adjust VF's reset process
Currently when VF need to reset itself, it will send a cmd to PF,
after receiving the VF reset requset, PF sends a cmd to inform
VF to enter the reset process and send a cmd to firmware to do the
actual reset for the VF, it is possible that firmware has resetted
the VF, but VF has not entered the reset process, which may cause
IO not stopped problem when firmware is resetting VF.
This patch fixes it by adjusting the VF reset process, when VF
need to reset itself, it will enter the reset process first, and
it will tell the PF to send cmd to firmware to reset itself.
Add member reset_pending to struct hclgevf_dev, which indicates that
there is reset event need to be processed by the VF's reset task, and
the VF's reset task chooses the highest-level one and clears other
low-level one when it processes reset_pending.
hclge_inform_reset_assert_to_vf function is unused now, but it will
be used to support the PF reset with VF working, so declare it in
the header file.
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-09 22:07:47 +08:00
|
|
|
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
|
2018-03-22 22:29:00 +08:00
|
|
|
{
|
2018-11-09 22:07:48 +08:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
enum hnae3_reset_type reset_type;
|
2018-03-22 22:29:00 +08:00
|
|
|
u8 msg_data[2];
|
|
|
|
u8 dest_vfid;
|
|
|
|
|
|
|
|
dest_vfid = (u8)vport->vport_id;
|
|
|
|
|
2018-11-09 22:07:48 +08:00
|
|
|
if (hdev->reset_type == HNAE3_FUNC_RESET)
|
|
|
|
reset_type = HNAE3_VF_PF_FUNC_RESET;
|
2018-11-09 22:07:54 +08:00
|
|
|
else if (hdev->reset_type == HNAE3_FLR_RESET)
|
|
|
|
reset_type = HNAE3_VF_FULL_RESET;
|
2018-11-09 22:07:48 +08:00
|
|
|
else
|
2019-06-07 10:03:07 +08:00
|
|
|
reset_type = HNAE3_VF_FUNC_RESET;
|
2018-11-09 22:07:48 +08:00
|
|
|
|
|
|
|
memcpy(&msg_data[0], &reset_type, sizeof(u16));
|
|
|
|
|
2018-03-22 22:29:00 +08:00
|
|
|
/* send this requested info to VF */
|
2018-11-09 22:07:48 +08:00
|
|
|
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
2018-03-22 22:29:00 +08:00
|
|
|
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:08 +08:00
|
|
|
static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
|
|
|
|
{
|
|
|
|
struct hnae3_ring_chain_node *chain_tmp, *chain;
|
|
|
|
|
|
|
|
chain = head->next;
|
|
|
|
|
|
|
|
while (chain) {
|
|
|
|
chain_tmp = chain->next;
|
|
|
|
kzfree(chain);
|
|
|
|
chain = chain_tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-16 23:36:27 +08:00
|
|
|
/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
|
|
|
|
* from mailbox message
|
2017-12-15 02:03:08 +08:00
|
|
|
* msg[0]: opcode
|
|
|
|
* msg[1]: <not relevant to this function>
|
|
|
|
* msg[2]: ring_num
|
|
|
|
* msg[3]: first ring type (TX|RX)
|
|
|
|
* msg[4]: first tqp id
|
2018-07-16 23:36:27 +08:00
|
|
|
* msg[5]: first int_gl idx
|
|
|
|
* msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
|
2017-12-15 02:03:08 +08:00
|
|
|
*/
|
|
|
|
static int hclge_get_ring_chain_from_mbx(
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req,
|
|
|
|
struct hnae3_ring_chain_node *ring_chain,
|
|
|
|
struct hclge_vport *vport)
|
|
|
|
{
|
|
|
|
struct hnae3_ring_chain_node *cur_chain, *new_chain;
|
|
|
|
int ring_num;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ring_num = req->msg[2];
|
|
|
|
|
2018-03-10 11:29:29 +08:00
|
|
|
if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM -
|
|
|
|
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
|
|
|
|
HCLGE_MBX_RING_NODE_VARIABLE_NUM))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
|
2017-12-15 02:03:08 +08:00
|
|
|
ring_chain->tqp_index =
|
|
|
|
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
|
2018-07-16 23:36:26 +08:00
|
|
|
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
|
HNAE3_RING_GL_IDX_S,
|
2018-07-02 15:50:26 +08:00
|
|
|
req->msg[5]);
|
2017-12-15 02:03:08 +08:00
|
|
|
|
|
|
|
cur_chain = ring_chain;
|
|
|
|
|
|
|
|
for (i = 1; i < ring_num; i++) {
|
|
|
|
new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
|
|
|
|
if (!new_chain)
|
|
|
|
goto err;
|
|
|
|
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
|
|
|
|
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
|
|
|
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
|
2017-12-15 02:03:08 +08:00
|
|
|
|
|
|
|
new_chain->tqp_index =
|
|
|
|
hclge_get_queue_id(vport->nic.kinfo.tqp
|
2018-03-10 11:29:29 +08:00
|
|
|
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
|
|
|
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
|
2017-12-15 02:03:08 +08:00
|
|
|
|
2018-07-16 23:36:26 +08:00
|
|
|
hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
|
HNAE3_RING_GL_IDX_S,
|
2018-07-02 15:50:26 +08:00
|
|
|
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
|
|
|
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
|
2018-01-26 19:31:25 +08:00
|
|
|
|
2017-12-15 02:03:08 +08:00
|
|
|
cur_chain->next = new_chain;
|
|
|
|
cur_chain = new_chain;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
hclge_free_vector_ring_chain(ring_chain);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req)
|
|
|
|
{
|
|
|
|
struct hnae3_ring_chain_node ring_chain;
|
|
|
|
int vector_id = req->msg[1];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
memset(&ring_chain, 0, sizeof(ring_chain));
|
|
|
|
ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
|
|
|
|
|
|
|
|
hclge_free_vector_ring_chain(&ring_chain);
|
|
|
|
|
2019-05-28 17:03:02 +08:00
|
|
|
return ret;
|
2017-12-15 02:03:08 +08:00
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req)
|
|
|
|
{
|
2019-10-08 09:20:06 +08:00
|
|
|
#define HCLGE_MBX_BC_INDEX 1
|
|
|
|
#define HCLGE_MBX_UC_INDEX 2
|
|
|
|
#define HCLGE_MBX_MC_INDEX 3
|
2017-12-15 02:03:07 +08:00
|
|
|
|
2019-10-08 09:20:06 +08:00
|
|
|
bool en_bc = req->msg[HCLGE_MBX_BC_INDEX] ? true : false;
|
|
|
|
bool en_uc = req->msg[HCLGE_MBX_UC_INDEX] ? true : false;
|
|
|
|
bool en_mc = req->msg[HCLGE_MBX_MC_INDEX] ? true : false;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!vport->vf_info.trusted) {
|
|
|
|
en_uc = false;
|
|
|
|
en_mc = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
|
|
|
|
if (req->mbx_need_resp)
|
|
|
|
hclge_gen_resp_to_vf(vport, req, ret, NULL, 0);
|
|
|
|
|
|
|
|
vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
|
|
|
|
{
|
|
|
|
u8 dest_vfid = (u8)vport->vport_id;
|
|
|
|
u8 msg_data[2];
|
|
|
|
|
|
|
|
memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
|
|
|
|
|
|
|
|
hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
|
|
|
HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
|
2017-12-15 02:03:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
|
2019-04-25 20:42:50 +08:00
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
2017-12-15 02:03:07 +08:00
|
|
|
{
|
|
|
|
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
|
|
|
|
const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
|
|
|
|
|
2019-10-08 09:20:08 +08:00
|
|
|
/* If VF MAC has been configured by the host then it
|
|
|
|
* cannot be overridden by the MAC specified by the VM.
|
|
|
|
*/
|
|
|
|
if (!is_zero_ether_addr(vport->vf_info.mac) &&
|
|
|
|
!ether_addr_equal(mac_addr, vport->vf_info.mac)) {
|
|
|
|
status = -EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(mac_addr)) {
|
|
|
|
status = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
hclge_rm_uc_addr_common(vport, old_addr);
|
|
|
|
status = hclge_add_uc_addr_common(vport, mac_addr);
|
2019-02-20 10:32:48 +08:00
|
|
|
if (status) {
|
2018-03-10 11:29:22 +08:00
|
|
|
hclge_add_uc_addr_common(vport, old_addr);
|
2019-02-20 10:32:48 +08:00
|
|
|
} else {
|
|
|
|
hclge_rm_vport_mac_table(vport, mac_addr,
|
|
|
|
false, HCLGE_MAC_ADDR_UC);
|
|
|
|
hclge_add_vport_mac_table(vport, mac_addr,
|
|
|
|
HCLGE_MAC_ADDR_UC);
|
|
|
|
}
|
2017-12-15 02:03:07 +08:00
|
|
|
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
|
|
|
|
status = hclge_add_uc_addr_common(vport, mac_addr);
|
2019-02-20 10:32:48 +08:00
|
|
|
if (!status)
|
|
|
|
hclge_add_vport_mac_table(vport, mac_addr,
|
|
|
|
HCLGE_MAC_ADDR_UC);
|
2017-12-15 02:03:07 +08:00
|
|
|
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
|
|
|
|
status = hclge_rm_uc_addr_common(vport, mac_addr);
|
2019-02-20 10:32:48 +08:00
|
|
|
if (!status)
|
|
|
|
hclge_rm_vport_mac_table(vport, mac_addr,
|
|
|
|
false, HCLGE_MAC_ADDR_UC);
|
2017-12-15 02:03:07 +08:00
|
|
|
} else {
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"failed to set unicast mac addr, unknown subcode %u\n",
|
2017-12-15 02:03:07 +08:00
|
|
|
mbx_req->msg[1]);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2019-10-08 09:20:08 +08:00
|
|
|
out:
|
2019-04-25 20:42:50 +08:00
|
|
|
if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
|
2017-12-15 02:03:07 +08:00
|
|
|
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
|
|
|
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
2018-06-02 00:52:11 +08:00
|
|
|
u8 resp_len = 0;
|
|
|
|
u8 resp_data;
|
2017-12-15 02:03:07 +08:00
|
|
|
int status;
|
|
|
|
|
|
|
|
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
|
|
|
|
status = hclge_add_mc_addr_common(vport, mac_addr);
|
2019-02-20 10:32:48 +08:00
|
|
|
if (!status)
|
|
|
|
hclge_add_vport_mac_table(vport, mac_addr,
|
|
|
|
HCLGE_MAC_ADDR_MC);
|
2017-12-15 02:03:07 +08:00
|
|
|
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
|
|
|
|
status = hclge_rm_mc_addr_common(vport, mac_addr);
|
2019-02-20 10:32:48 +08:00
|
|
|
if (!status)
|
|
|
|
hclge_rm_vport_mac_table(vport, mac_addr,
|
|
|
|
false, HCLGE_MAC_ADDR_MC);
|
2017-12-15 02:03:07 +08:00
|
|
|
} else {
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"failed to set mcast mac addr, unknown subcode %u\n",
|
2017-12-15 02:03:07 +08:00
|
|
|
mbx_req->msg[1]);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gen_resp)
|
2018-06-02 00:52:11 +08:00
|
|
|
hclge_gen_resp_to_vf(vport, mbx_req, status,
|
|
|
|
&resp_data, resp_len);
|
2017-12-15 02:03:07 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-14 09:47:38 +08:00
|
|
|
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
|
|
|
|
u16 state, u16 vlan_tag, u16 qos,
|
|
|
|
u16 vlan_proto)
|
|
|
|
{
|
|
|
|
#define MSG_DATA_SIZE 8
|
|
|
|
|
|
|
|
u8 msg_data[MSG_DATA_SIZE];
|
|
|
|
|
|
|
|
memcpy(&msg_data[0], &state, sizeof(u16));
|
|
|
|
memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
|
|
|
|
memcpy(&msg_data[4], &qos, sizeof(u16));
|
|
|
|
memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
|
|
|
|
|
|
|
|
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
2019-07-21 21:08:31 +08:00
|
|
|
HCLGE_MBX_PUSH_VLAN_INFO, vfid);
|
2019-04-14 09:47:38 +08:00
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
|
2019-04-14 09:47:38 +08:00
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
2017-12-15 02:03:07 +08:00
|
|
|
{
|
2019-06-13 17:12:32 +08:00
|
|
|
struct hclge_vf_vlan_cfg *msg_cmd;
|
2017-12-15 02:03:07 +08:00
|
|
|
int status = 0;
|
|
|
|
|
2019-06-13 17:12:32 +08:00
|
|
|
msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
|
|
|
|
if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
|
2018-05-02 02:56:04 +08:00
|
|
|
struct hnae3_handle *handle = &vport->nic;
|
2017-12-15 02:03:07 +08:00
|
|
|
u16 vlan, proto;
|
|
|
|
bool is_kill;
|
|
|
|
|
2019-06-13 17:12:32 +08:00
|
|
|
is_kill = !!msg_cmd->is_kill;
|
|
|
|
vlan = msg_cmd->vlan;
|
|
|
|
proto = msg_cmd->proto;
|
2018-05-02 02:56:04 +08:00
|
|
|
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
|
|
|
|
vlan, is_kill);
|
2019-10-08 09:20:05 +08:00
|
|
|
if (mbx_req->mbx_need_resp)
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, status,
|
|
|
|
NULL, 0);
|
2019-06-13 17:12:32 +08:00
|
|
|
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
|
2018-05-04 00:28:11 +08:00
|
|
|
struct hnae3_handle *handle = &vport->nic;
|
2019-06-13 17:12:32 +08:00
|
|
|
bool en = msg_cmd->is_kill ? true : false;
|
2018-05-04 00:28:11 +08:00
|
|
|
|
|
|
|
status = hclge_en_hw_strip_rxvtag(handle, en);
|
2019-04-14 09:47:38 +08:00
|
|
|
} else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
|
|
|
|
struct hclge_vlan_info *vlan_info;
|
|
|
|
u16 *state;
|
|
|
|
|
|
|
|
state = (u16 *)&mbx_req->msg[2];
|
|
|
|
vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
|
|
|
|
status = hclge_update_port_base_vlan_cfg(vport, *state,
|
|
|
|
vlan_info);
|
|
|
|
} else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
|
|
|
|
u8 state;
|
|
|
|
|
|
|
|
state = vport->port_base_vlan_cfg.state;
|
|
|
|
status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
|
|
|
|
sizeof(u8));
|
2017-12-15 02:03:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-11-18 11:19:12 +08:00
|
|
|
static int hclge_set_vf_alive(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
|
|
|
bool alive = !!mbx_req->msg[2];
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (alive)
|
|
|
|
ret = hclge_vport_start(vport);
|
|
|
|
else
|
|
|
|
hclge_vport_stop(vport);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
2019-01-31 04:55:45 +08:00
|
|
|
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
|
|
|
u8 vf_tc_map = 0;
|
2019-06-13 17:12:32 +08:00
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
2019-01-31 04:55:45 +08:00
|
|
|
|
|
|
|
for (i = 0; i < kinfo->num_tc; i++)
|
|
|
|
vf_tc_map |= BIT(i);
|
2017-12-15 02:03:07 +08:00
|
|
|
|
2019-01-31 04:55:45 +08:00
|
|
|
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
|
2019-06-07 10:03:12 +08:00
|
|
|
sizeof(vf_tc_map));
|
2017-12-15 02:03:07 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_get_vf_queue_info(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
2019-02-23 17:22:15 +08:00
|
|
|
#define HCLGE_TQPS_RSS_INFO_LEN 6
|
2017-12-15 02:03:07 +08:00
|
|
|
u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
|
|
|
/* get the queue related info */
|
|
|
|
memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
|
2018-03-08 19:41:50 +08:00
|
|
|
memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
|
2019-02-23 17:22:15 +08:00
|
|
|
memcpy(&resp_data[4], &hdev->rx_buf_len, sizeof(u16));
|
2017-12-15 02:03:07 +08:00
|
|
|
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
|
|
|
|
HCLGE_TQPS_RSS_INFO_LEN);
|
|
|
|
}
|
|
|
|
|
2019-10-08 09:20:08 +08:00
|
|
|
static int hclge_get_vf_mac_addr(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, 0, vport->vf_info.mac,
|
|
|
|
ETH_ALEN);
|
|
|
|
}
|
|
|
|
|
2019-02-23 17:22:15 +08:00
|
|
|
static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
|
|
|
#define HCLGE_TQPS_DEPTH_INFO_LEN 4
|
|
|
|
u8 resp_data[HCLGE_TQPS_DEPTH_INFO_LEN];
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
|
|
|
/* get the queue depth info */
|
|
|
|
memcpy(&resp_data[0], &hdev->num_tx_desc, sizeof(u16));
|
|
|
|
memcpy(&resp_data[2], &hdev->num_rx_desc, sizeof(u16));
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
|
|
|
|
HCLGE_TQPS_DEPTH_INFO_LEN);
|
|
|
|
}
|
|
|
|
|
2019-04-04 16:17:50 +08:00
|
|
|
static int hclge_get_vf_media_type(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
2019-05-03 17:50:37 +08:00
|
|
|
u8 resp_data[2];
|
2019-04-04 16:17:50 +08:00
|
|
|
|
2019-05-03 17:50:37 +08:00
|
|
|
resp_data[0] = hdev->hw.mac.media_type;
|
|
|
|
resp_data[1] = hdev->hw.mac.module_type;
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
|
2019-04-04 16:17:50 +08:00
|
|
|
sizeof(resp_data));
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
static int hclge_get_link_info(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
2019-10-08 09:20:04 +08:00
|
|
|
#define HCLGE_VF_LINK_STATE_UP 1U
|
|
|
|
#define HCLGE_VF_LINK_STATE_DOWN 0U
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
u16 link_status;
|
2019-04-04 16:17:50 +08:00
|
|
|
u8 msg_data[8];
|
2017-12-15 02:03:07 +08:00
|
|
|
u8 dest_vfid;
|
2018-03-21 15:49:29 +08:00
|
|
|
u16 duplex;
|
2017-12-15 02:03:07 +08:00
|
|
|
|
|
|
|
/* mac.link can only be 0 or 1 */
|
2019-10-08 09:20:04 +08:00
|
|
|
switch (vport->vf_info.link_state) {
|
|
|
|
case IFLA_VF_LINK_STATE_ENABLE:
|
|
|
|
link_status = HCLGE_VF_LINK_STATE_UP;
|
|
|
|
break;
|
|
|
|
case IFLA_VF_LINK_STATE_DISABLE:
|
|
|
|
link_status = HCLGE_VF_LINK_STATE_DOWN;
|
|
|
|
break;
|
|
|
|
case IFLA_VF_LINK_STATE_AUTO:
|
|
|
|
default:
|
|
|
|
link_status = (u16)hdev->hw.mac.link;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-03-21 15:49:29 +08:00
|
|
|
duplex = hdev->hw.mac.duplex;
|
2017-12-15 02:03:07 +08:00
|
|
|
memcpy(&msg_data[0], &link_status, sizeof(u16));
|
2018-03-21 15:49:29 +08:00
|
|
|
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
|
|
|
|
memcpy(&msg_data[6], &duplex, sizeof(u16));
|
2017-12-15 02:03:07 +08:00
|
|
|
dest_vfid = mbx_req->mbx_src_vfid;
|
|
|
|
|
|
|
|
/* send this requested info to VF */
|
2018-03-21 15:49:29 +08:00
|
|
|
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
2017-12-15 02:03:07 +08:00
|
|
|
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
|
|
|
|
}
|
|
|
|
|
2019-02-02 22:39:33 +08:00
|
|
|
static void hclge_get_link_mode(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
#define HCLGE_SUPPORTED 1
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
unsigned long advertising;
|
|
|
|
unsigned long supported;
|
|
|
|
unsigned long send_data;
|
|
|
|
u8 msg_data[10];
|
|
|
|
u8 dest_vfid;
|
|
|
|
|
|
|
|
advertising = hdev->hw.mac.advertising[0];
|
|
|
|
supported = hdev->hw.mac.supported[0];
|
|
|
|
dest_vfid = mbx_req->mbx_src_vfid;
|
|
|
|
msg_data[0] = mbx_req->msg[2];
|
|
|
|
|
|
|
|
send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
|
|
|
|
|
|
|
|
memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
|
|
|
|
hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
|
|
|
HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
|
|
|
|
}
|
|
|
|
|
2018-03-21 15:49:21 +08:00
|
|
|
static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
2017-12-15 02:03:08 +08:00
|
|
|
{
|
|
|
|
u16 queue_id;
|
|
|
|
|
|
|
|
memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
|
|
|
|
|
2018-03-21 15:49:21 +08:00
|
|
|
hclge_reset_vf_queue(vport, queue_id);
|
|
|
|
|
2019-08-16 16:09:37 +08:00
|
|
|
/* send response msg to VF after queue reset complete */
|
2018-03-21 15:49:21 +08:00
|
|
|
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
|
2017-12-15 02:03:08 +08:00
|
|
|
}
|
|
|
|
|
2018-03-22 22:29:00 +08:00
|
|
|
static void hclge_reset_vf(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
int ret;
|
|
|
|
|
2019-10-31 19:23:23 +08:00
|
|
|
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
|
net: hns3: adjust VF's reset process
Currently when VF need to reset itself, it will send a cmd to PF,
after receiving the VF reset requset, PF sends a cmd to inform
VF to enter the reset process and send a cmd to firmware to do the
actual reset for the VF, it is possible that firmware has resetted
the VF, but VF has not entered the reset process, which may cause
IO not stopped problem when firmware is resetting VF.
This patch fixes it by adjusting the VF reset process, when VF
need to reset itself, it will enter the reset process first, and
it will tell the PF to send cmd to firmware to reset itself.
Add member reset_pending to struct hclgevf_dev, which indicates that
there is reset event need to be processed by the VF's reset task, and
the VF's reset task chooses the highest-level one and clears other
low-level one when it processes reset_pending.
hclge_inform_reset_assert_to_vf function is unused now, but it will
be used to support the PF reset with VF working, so declare it in
the header file.
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-09 22:07:47 +08:00
|
|
|
vport->vport_id);
|
2018-03-22 22:29:00 +08:00
|
|
|
|
net: hns3: adjust VF's reset process
Currently when VF need to reset itself, it will send a cmd to PF,
after receiving the VF reset requset, PF sends a cmd to inform
VF to enter the reset process and send a cmd to firmware to do the
actual reset for the VF, it is possible that firmware has resetted
the VF, but VF has not entered the reset process, which may cause
IO not stopped problem when firmware is resetting VF.
This patch fixes it by adjusting the VF reset process, when VF
need to reset itself, it will enter the reset process first, and
it will tell the PF to send cmd to firmware to reset itself.
Add member reset_pending to struct hclgevf_dev, which indicates that
there is reset event need to be processed by the VF's reset task, and
the VF's reset task chooses the highest-level one and clears other
low-level one when it processes reset_pending.
hclge_inform_reset_assert_to_vf function is unused now, but it will
be used to support the PF reset with VF working, so declare it in
the header file.
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-09 22:07:47 +08:00
|
|
|
ret = hclge_func_reset_cmd(hdev, vport->vport_id);
|
|
|
|
hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
|
2018-03-22 22:29:00 +08:00
|
|
|
}
|
|
|
|
|
2018-11-18 11:19:12 +08:00
|
|
|
static void hclge_vf_keep_alive(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
vport->last_active_jiffies = jiffies;
|
|
|
|
}
|
|
|
|
|
2018-11-18 11:19:13 +08:00
|
|
|
static int hclge_set_vf_mtu(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u32 mtu;
|
|
|
|
|
|
|
|
memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
|
|
|
|
ret = hclge_set_vport_mtu(vport, mtu);
|
|
|
|
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2018-12-15 23:31:57 +08:00
|
|
|
static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
u16 queue_id, qid_in_pf;
|
|
|
|
u8 resp_data[2];
|
|
|
|
|
|
|
|
memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
|
|
|
|
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
|
|
|
|
memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
|
|
|
|
|
2019-10-31 19:23:18 +08:00
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
|
|
|
|
sizeof(resp_data));
|
2018-12-15 23:31:57 +08:00
|
|
|
}
|
|
|
|
|
2019-02-23 17:22:17 +08:00
|
|
|
static int hclge_get_rss_key(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
#define HCLGE_RSS_MBX_RESP_LEN 8
|
|
|
|
u8 resp_data[HCLGE_RSS_MBX_RESP_LEN];
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
u8 index;
|
|
|
|
|
|
|
|
index = mbx_req->msg[2];
|
|
|
|
|
|
|
|
memcpy(&resp_data[0],
|
|
|
|
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
|
|
|
|
HCLGE_RSS_MBX_RESP_LEN);
|
|
|
|
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
|
|
|
|
HCLGE_RSS_MBX_RESP_LEN);
|
|
|
|
}
|
|
|
|
|
2019-08-01 11:55:34 +08:00
|
|
|
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
|
|
|
|
{
|
|
|
|
switch (link_fail_code) {
|
|
|
|
case HCLGE_LF_REF_CLOCK_LOST:
|
|
|
|
dev_warn(&hdev->pdev->dev, "Reference clock lost!\n");
|
|
|
|
break;
|
|
|
|
case HCLGE_LF_XSFP_TX_DISABLE:
|
|
|
|
dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n");
|
|
|
|
break;
|
|
|
|
case HCLGE_LF_XSFP_ABSENT:
|
|
|
|
dev_warn(&hdev->pdev->dev, "SFP is absent!\n");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hclge_handle_link_change_event(struct hclge_dev *hdev,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req)
|
|
|
|
{
|
|
|
|
#define LINK_STATUS_OFFSET 1
|
|
|
|
#define LINK_FAIL_CODE_OFFSET 2
|
|
|
|
|
|
|
|
hclge_task_schedule(hdev, 0);
|
|
|
|
|
|
|
|
if (!req->msg[LINK_STATUS_OFFSET])
|
|
|
|
hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]);
|
|
|
|
}
|
|
|
|
|
2018-05-26 02:43:00 +08:00
|
|
|
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
|
|
|
|
{
|
|
|
|
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
|
|
|
|
|
|
|
|
return tail == hw->cmq.crq.next_to_use;
|
|
|
|
}
|
|
|
|
|
2019-08-01 11:55:35 +08:00
|
|
|
static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
|
|
|
|
{
|
|
|
|
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
|
|
|
|
|
|
|
|
ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
|
|
|
|
dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
|
|
|
|
ae_dev->ops->reset_event(hdev->pdev, NULL);
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
void hclge_mbx_handler(struct hclge_dev *hdev)
|
|
|
|
{
|
|
|
|
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req;
|
|
|
|
struct hclge_vport *vport;
|
|
|
|
struct hclge_desc *desc;
|
2019-06-13 17:12:32 +08:00
|
|
|
unsigned int flag;
|
|
|
|
int ret;
|
2017-12-15 02:03:07 +08:00
|
|
|
|
|
|
|
/* handle all the mailbox requests in the queue */
|
2018-05-26 02:43:00 +08:00
|
|
|
while (!hclge_cmd_crq_empty(&hdev->hw)) {
|
2018-10-30 21:50:51 +08:00
|
|
|
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
|
|
|
|
dev_warn(&hdev->pdev->dev,
|
|
|
|
"command queue needs re-initializing\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
desc = &crq->desc[crq->next_to_use];
|
|
|
|
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
|
|
|
|
|
2018-05-26 02:43:00 +08:00
|
|
|
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
|
2018-07-02 15:50:26 +08:00
|
|
|
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
|
2018-05-26 02:43:00 +08:00
|
|
|
dev_warn(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"dropped invalid mailbox message, code = %u\n",
|
2018-05-26 02:43:00 +08:00
|
|
|
req->msg[0]);
|
|
|
|
|
|
|
|
/* dropping/not processing this invalid message */
|
|
|
|
crq->desc[crq->next_to_use].flag = 0;
|
|
|
|
hclge_mbx_ring_ptr_move_crq(crq);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
vport = &hdev->vport[req->mbx_src_vfid];
|
|
|
|
|
|
|
|
switch (req->msg[0]) {
|
2017-12-15 02:03:08 +08:00
|
|
|
case HCLGE_MBX_MAP_RING_TO_VECTOR:
|
|
|
|
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
|
|
|
|
req);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
|
|
|
|
ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
|
|
|
|
req);
|
|
|
|
break;
|
2017-12-15 02:03:07 +08:00
|
|
|
case HCLGE_MBX_SET_PROMISC_MODE:
|
|
|
|
ret = hclge_set_vf_promisc_mode(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to set VF promisc mode\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_SET_UNICAST:
|
2019-04-25 20:42:50 +08:00
|
|
|
ret = hclge_set_vf_uc_mac_addr(vport, req);
|
2017-12-15 02:03:07 +08:00
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to set VF UC MAC Addr\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_SET_MULTICAST:
|
|
|
|
ret = hclge_set_vf_mc_mac_addr(vport, req, false);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to set VF MC MAC Addr\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_SET_VLAN:
|
2019-04-14 09:47:38 +08:00
|
|
|
ret = hclge_set_vf_vlan_cfg(vport, req);
|
2017-12-15 02:03:07 +08:00
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to config VF's VLAN\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2018-11-18 11:19:12 +08:00
|
|
|
case HCLGE_MBX_SET_ALIVE:
|
|
|
|
ret = hclge_set_vf_alive(vport, req, false);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to set VF's ALIVE\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2017-12-15 02:03:07 +08:00
|
|
|
case HCLGE_MBX_GET_QINFO:
|
|
|
|
ret = hclge_get_vf_queue_info(vport, req, true);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to get Q info for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2019-02-23 17:22:15 +08:00
|
|
|
case HCLGE_MBX_GET_QDEPTH:
|
|
|
|
ret = hclge_get_vf_queue_depth(vport, req, true);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to get Q depth for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
case HCLGE_MBX_GET_TCINFO:
|
|
|
|
ret = hclge_get_vf_tcinfo(vport, req, true);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to get TC info for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_GET_LINK_STATUS:
|
|
|
|
ret = hclge_get_link_info(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to get link stat for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2017-12-15 02:03:08 +08:00
|
|
|
case HCLGE_MBX_QUEUE_RESET:
|
2018-03-21 15:49:21 +08:00
|
|
|
hclge_mbx_reset_vf_queue(vport, req);
|
2017-12-15 02:03:08 +08:00
|
|
|
break;
|
2018-03-22 22:29:00 +08:00
|
|
|
case HCLGE_MBX_RESET:
|
|
|
|
hclge_reset_vf(vport, req);
|
|
|
|
break;
|
2018-11-18 11:19:12 +08:00
|
|
|
case HCLGE_MBX_KEEP_ALIVE:
|
|
|
|
hclge_vf_keep_alive(vport, req);
|
|
|
|
break;
|
2018-11-18 11:19:13 +08:00
|
|
|
case HCLGE_MBX_SET_MTU:
|
|
|
|
ret = hclge_set_vf_mtu(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"VF fail(%d) to set mtu\n", ret);
|
|
|
|
break;
|
2018-12-15 23:31:57 +08:00
|
|
|
case HCLGE_MBX_GET_QID_IN_PF:
|
|
|
|
ret = hclge_get_queue_id_in_pf(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to get qid for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2019-02-23 17:22:17 +08:00
|
|
|
case HCLGE_MBX_GET_RSS_KEY:
|
|
|
|
ret = hclge_get_rss_key(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to get rss key for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2019-02-02 22:39:33 +08:00
|
|
|
case HCLGE_MBX_GET_LINK_MODE:
|
|
|
|
hclge_get_link_mode(vport, req);
|
|
|
|
break;
|
2019-02-20 10:32:48 +08:00
|
|
|
case HCLGE_MBX_GET_VF_FLR_STATUS:
|
|
|
|
hclge_rm_vport_all_mac_table(vport, true,
|
|
|
|
HCLGE_MAC_ADDR_UC);
|
|
|
|
hclge_rm_vport_all_mac_table(vport, true,
|
|
|
|
HCLGE_MAC_ADDR_MC);
|
2019-02-20 10:32:49 +08:00
|
|
|
hclge_rm_vport_all_vlan_table(vport, true);
|
2019-02-20 10:32:48 +08:00
|
|
|
break;
|
2019-04-04 16:17:50 +08:00
|
|
|
case HCLGE_MBX_GET_MEDIA_TYPE:
|
|
|
|
ret = hclge_get_vf_media_type(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to media type for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2019-08-01 11:55:34 +08:00
|
|
|
case HCLGE_MBX_PUSH_LINK_STATUS:
|
|
|
|
hclge_handle_link_change_event(hdev, req);
|
|
|
|
break;
|
2019-10-08 09:20:08 +08:00
|
|
|
case HCLGE_MBX_GET_MAC_ADDR:
|
|
|
|
ret = hclge_get_vf_mac_addr(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to get MAC for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2019-08-01 11:55:35 +08:00
|
|
|
case HCLGE_MBX_NCSI_ERROR:
|
|
|
|
hclge_handle_ncsi_error(hdev);
|
|
|
|
break;
|
2017-12-15 02:03:07 +08:00
|
|
|
default:
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"un-supported mailbox message, code = %u\n",
|
2017-12-15 02:03:07 +08:00
|
|
|
req->msg[0]);
|
|
|
|
break;
|
|
|
|
}
|
2018-03-08 19:41:51 +08:00
|
|
|
crq->desc[crq->next_to_use].flag = 0;
|
2017-12-15 02:03:07 +08:00
|
|
|
hclge_mbx_ring_ptr_move_crq(crq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
|
|
|
|
hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
|
|
|
|
}
|