2017-12-15 02:03:07 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
// Copyright (c) 2016-2017 Hisilicon Limited.
|
|
|
|
|
|
|
|
#include "hclge_main.h"
|
|
|
|
#include "hclge_mbx.h"
|
|
|
|
#include "hnae3.h"
|
|
|
|
|
|
|
|
/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
|
|
|
|
* receives a mailbox message from VF.
|
|
|
|
* @vport: pointer to struct hclge_vport
|
|
|
|
* @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
|
|
|
|
* message
|
|
|
|
* @resp_status: indicate to VF whether its request success(0) or failed.
|
|
|
|
*/
|
|
|
|
static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
|
|
|
|
int resp_status,
|
|
|
|
u8 *resp_data, u16 resp_data_len)
|
|
|
|
{
|
|
|
|
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
enum hclge_cmd_status status;
|
|
|
|
struct hclge_desc desc;
|
|
|
|
|
|
|
|
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
|
|
|
|
|
|
|
|
if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail to gen resp to VF len %d exceeds max len %d\n",
|
|
|
|
resp_data_len,
|
|
|
|
HCLGE_MBX_MAX_RESP_DATA_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
|
|
|
|
|
|
|
|
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
|
|
|
|
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
|
|
|
|
|
|
|
|
resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
|
|
|
|
resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
|
|
|
|
resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
|
|
|
|
resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
|
|
|
|
|
|
|
|
if (resp_data && resp_data_len > 0)
|
|
|
|
memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
|
|
|
|
|
|
|
|
status = hclge_cmd_send(&hdev->hw, &desc, 1);
|
|
|
|
if (status)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(=%d) to send response to VF\n", status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
|
|
|
|
u16 mbx_opcode, u8 dest_vfid)
|
|
|
|
{
|
|
|
|
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
enum hclge_cmd_status status;
|
|
|
|
struct hclge_desc desc;
|
|
|
|
|
|
|
|
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
|
|
|
|
|
|
|
|
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
|
|
|
|
|
|
|
|
resp_pf_to_vf->dest_vfid = dest_vfid;
|
|
|
|
resp_pf_to_vf->msg_len = msg_len;
|
|
|
|
resp_pf_to_vf->msg[0] = mbx_opcode;
|
|
|
|
|
|
|
|
memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
|
|
|
|
|
|
|
|
status = hclge_cmd_send(&hdev->hw, &desc, 1);
|
|
|
|
if (status)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(=%d) to send mailbox message to VF\n",
|
|
|
|
status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
net: hns3: adjust VF's reset process
Currently when VF need to reset itself, it will send a cmd to PF,
after receiving the VF reset requset, PF sends a cmd to inform
VF to enter the reset process and send a cmd to firmware to do the
actual reset for the VF, it is possible that firmware has resetted
the VF, but VF has not entered the reset process, which may cause
IO not stopped problem when firmware is resetting VF.
This patch fixes it by adjusting the VF reset process, when VF
need to reset itself, it will enter the reset process first, and
it will tell the PF to send cmd to firmware to reset itself.
Add member reset_pending to struct hclgevf_dev, which indicates that
there is reset event need to be processed by the VF's reset task, and
the VF's reset task chooses the highest-level one and clears other
low-level one when it processes reset_pending.
hclge_inform_reset_assert_to_vf function is unused now, but it will
be used to support the PF reset with VF working, so declare it in
the header file.
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-09 22:07:47 +08:00
|
|
|
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
|
2018-03-22 22:29:00 +08:00
|
|
|
{
|
2018-11-09 22:07:48 +08:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
enum hnae3_reset_type reset_type;
|
2018-03-22 22:29:00 +08:00
|
|
|
u8 msg_data[2];
|
|
|
|
u8 dest_vfid;
|
|
|
|
|
|
|
|
dest_vfid = (u8)vport->vport_id;
|
|
|
|
|
2018-11-09 22:07:48 +08:00
|
|
|
if (hdev->reset_type == HNAE3_FUNC_RESET)
|
|
|
|
reset_type = HNAE3_VF_PF_FUNC_RESET;
|
2018-11-09 22:07:54 +08:00
|
|
|
else if (hdev->reset_type == HNAE3_FLR_RESET)
|
|
|
|
reset_type = HNAE3_VF_FULL_RESET;
|
2018-11-09 22:07:48 +08:00
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memcpy(&msg_data[0], &reset_type, sizeof(u16));
|
|
|
|
|
2018-03-22 22:29:00 +08:00
|
|
|
/* send this requested info to VF */
|
2018-11-09 22:07:48 +08:00
|
|
|
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
2018-03-22 22:29:00 +08:00
|
|
|
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:08 +08:00
|
|
|
static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
|
|
|
|
{
|
|
|
|
struct hnae3_ring_chain_node *chain_tmp, *chain;
|
|
|
|
|
|
|
|
chain = head->next;
|
|
|
|
|
|
|
|
while (chain) {
|
|
|
|
chain_tmp = chain->next;
|
|
|
|
kzfree(chain);
|
|
|
|
chain = chain_tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-16 23:36:27 +08:00
|
|
|
/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
|
|
|
|
* from mailbox message
|
2017-12-15 02:03:08 +08:00
|
|
|
* msg[0]: opcode
|
|
|
|
* msg[1]: <not relevant to this function>
|
|
|
|
* msg[2]: ring_num
|
|
|
|
* msg[3]: first ring type (TX|RX)
|
|
|
|
* msg[4]: first tqp id
|
2018-07-16 23:36:27 +08:00
|
|
|
* msg[5]: first int_gl idx
|
|
|
|
* msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
|
2017-12-15 02:03:08 +08:00
|
|
|
*/
|
|
|
|
static int hclge_get_ring_chain_from_mbx(
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req,
|
|
|
|
struct hnae3_ring_chain_node *ring_chain,
|
|
|
|
struct hclge_vport *vport)
|
|
|
|
{
|
|
|
|
struct hnae3_ring_chain_node *cur_chain, *new_chain;
|
|
|
|
int ring_num;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ring_num = req->msg[2];
|
|
|
|
|
2018-03-10 11:29:29 +08:00
|
|
|
if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM -
|
|
|
|
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
|
|
|
|
HCLGE_MBX_RING_NODE_VARIABLE_NUM))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
|
2017-12-15 02:03:08 +08:00
|
|
|
ring_chain->tqp_index =
|
|
|
|
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
|
2018-07-16 23:36:26 +08:00
|
|
|
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
|
HNAE3_RING_GL_IDX_S,
|
2018-07-02 15:50:26 +08:00
|
|
|
req->msg[5]);
|
2017-12-15 02:03:08 +08:00
|
|
|
|
|
|
|
cur_chain = ring_chain;
|
|
|
|
|
|
|
|
for (i = 1; i < ring_num; i++) {
|
|
|
|
new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
|
|
|
|
if (!new_chain)
|
|
|
|
goto err;
|
|
|
|
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
|
|
|
|
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
|
|
|
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
|
2017-12-15 02:03:08 +08:00
|
|
|
|
|
|
|
new_chain->tqp_index =
|
|
|
|
hclge_get_queue_id(vport->nic.kinfo.tqp
|
2018-03-10 11:29:29 +08:00
|
|
|
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
|
|
|
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
|
2017-12-15 02:03:08 +08:00
|
|
|
|
2018-07-16 23:36:26 +08:00
|
|
|
hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
|
HNAE3_RING_GL_IDX_S,
|
2018-07-02 15:50:26 +08:00
|
|
|
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
|
|
|
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
|
2018-01-26 19:31:25 +08:00
|
|
|
|
2017-12-15 02:03:08 +08:00
|
|
|
cur_chain->next = new_chain;
|
|
|
|
cur_chain = new_chain;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
hclge_free_vector_ring_chain(ring_chain);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req)
|
|
|
|
{
|
|
|
|
struct hnae3_ring_chain_node ring_chain;
|
|
|
|
int vector_id = req->msg[1];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
memset(&ring_chain, 0, sizeof(ring_chain));
|
|
|
|
ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
hclge_free_vector_ring_chain(&ring_chain);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req)
|
|
|
|
{
|
2018-06-02 00:52:02 +08:00
|
|
|
bool en_uc = req->msg[1] ? true : false;
|
|
|
|
bool en_mc = req->msg[2] ? true : false;
|
2017-12-15 02:03:07 +08:00
|
|
|
struct hclge_promisc_param param;
|
|
|
|
|
|
|
|
/* always enable broadcast promisc bit */
|
2018-06-02 00:52:02 +08:00
|
|
|
hclge_promisc_param_init(¶m, en_uc, en_mc, true, vport->vport_id);
|
2017-12-15 02:03:07 +08:00
|
|
|
return hclge_cmd_set_promisc_mode(vport->back, ¶m);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
|
|
|
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
|
|
|
|
const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
|
|
|
|
|
|
|
|
hclge_rm_uc_addr_common(vport, old_addr);
|
|
|
|
status = hclge_add_uc_addr_common(vport, mac_addr);
|
2018-03-10 11:29:22 +08:00
|
|
|
if (status)
|
|
|
|
hclge_add_uc_addr_common(vport, old_addr);
|
2017-12-15 02:03:07 +08:00
|
|
|
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
|
|
|
|
status = hclge_add_uc_addr_common(vport, mac_addr);
|
|
|
|
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
|
|
|
|
status = hclge_rm_uc_addr_common(vport, mac_addr);
|
|
|
|
} else {
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"failed to set unicast mac addr, unknown subcode %d\n",
|
|
|
|
mbx_req->msg[1]);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gen_resp)
|
|
|
|
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
|
|
|
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
2018-06-02 00:52:11 +08:00
|
|
|
u8 resp_len = 0;
|
|
|
|
u8 resp_data;
|
2017-12-15 02:03:07 +08:00
|
|
|
int status;
|
|
|
|
|
|
|
|
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
|
|
|
|
status = hclge_add_mc_addr_common(vport, mac_addr);
|
|
|
|
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
|
|
|
|
status = hclge_rm_mc_addr_common(vport, mac_addr);
|
|
|
|
} else {
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"failed to set mcast mac addr, unknown subcode %d\n",
|
|
|
|
mbx_req->msg[1]);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gen_resp)
|
2018-06-02 00:52:11 +08:00
|
|
|
hclge_gen_resp_to_vf(vport, mbx_req, status,
|
|
|
|
&resp_data, resp_len);
|
2017-12-15 02:03:07 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
|
2018-05-02 02:56:04 +08:00
|
|
|
struct hnae3_handle *handle = &vport->nic;
|
2017-12-15 02:03:07 +08:00
|
|
|
u16 vlan, proto;
|
|
|
|
bool is_kill;
|
|
|
|
|
|
|
|
is_kill = !!mbx_req->msg[2];
|
|
|
|
memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
|
|
|
|
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
|
2018-05-02 02:56:04 +08:00
|
|
|
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
|
|
|
|
vlan, is_kill);
|
2018-05-04 00:28:11 +08:00
|
|
|
} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
|
|
|
|
struct hnae3_handle *handle = &vport->nic;
|
|
|
|
bool en = mbx_req->msg[2] ? true : false;
|
|
|
|
|
|
|
|
status = hclge_en_hw_strip_rxvtag(handle, en);
|
2017-12-15 02:03:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (gen_resp)
|
|
|
|
status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-11-18 11:19:12 +08:00
|
|
|
static int hclge_set_vf_alive(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
|
|
|
bool alive = !!mbx_req->msg[2];
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (alive)
|
|
|
|
ret = hclge_vport_start(vport);
|
|
|
|
else
|
|
|
|
hclge_vport_stop(vport);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
|
|
|
|
sizeof(u8));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_get_vf_queue_info(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
bool gen_resp)
|
|
|
|
{
|
|
|
|
#define HCLGE_TQPS_RSS_INFO_LEN 8
|
|
|
|
u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
|
|
|
/* get the queue related info */
|
|
|
|
memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
|
2018-03-08 19:41:50 +08:00
|
|
|
memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
|
2017-12-15 02:03:07 +08:00
|
|
|
memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
|
|
|
|
memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
|
|
|
|
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
|
|
|
|
HCLGE_TQPS_RSS_INFO_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hclge_get_link_info(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
u16 link_status;
|
2018-03-21 15:49:29 +08:00
|
|
|
u8 msg_data[8];
|
2017-12-15 02:03:07 +08:00
|
|
|
u8 dest_vfid;
|
2018-03-21 15:49:29 +08:00
|
|
|
u16 duplex;
|
2017-12-15 02:03:07 +08:00
|
|
|
|
|
|
|
/* mac.link can only be 0 or 1 */
|
|
|
|
link_status = (u16)hdev->hw.mac.link;
|
2018-03-21 15:49:29 +08:00
|
|
|
duplex = hdev->hw.mac.duplex;
|
2017-12-15 02:03:07 +08:00
|
|
|
memcpy(&msg_data[0], &link_status, sizeof(u16));
|
2018-03-21 15:49:29 +08:00
|
|
|
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
|
|
|
|
memcpy(&msg_data[6], &duplex, sizeof(u16));
|
2017-12-15 02:03:07 +08:00
|
|
|
dest_vfid = mbx_req->mbx_src_vfid;
|
|
|
|
|
|
|
|
/* send this requested info to VF */
|
2018-03-21 15:49:29 +08:00
|
|
|
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
2017-12-15 02:03:07 +08:00
|
|
|
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
|
|
|
|
}
|
|
|
|
|
2018-03-21 15:49:21 +08:00
|
|
|
static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
2017-12-15 02:03:08 +08:00
|
|
|
{
|
|
|
|
u16 queue_id;
|
|
|
|
|
|
|
|
memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
|
|
|
|
|
2018-03-21 15:49:21 +08:00
|
|
|
hclge_reset_vf_queue(vport, queue_id);
|
|
|
|
|
|
|
|
/* send response msg to VF after queue reset complete*/
|
|
|
|
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
|
2017-12-15 02:03:08 +08:00
|
|
|
}
|
|
|
|
|
2018-03-22 22:29:00 +08:00
|
|
|
static void hclge_reset_vf(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
|
net: hns3: adjust VF's reset process
Currently when VF need to reset itself, it will send a cmd to PF,
after receiving the VF reset requset, PF sends a cmd to inform
VF to enter the reset process and send a cmd to firmware to do the
actual reset for the VF, it is possible that firmware has resetted
the VF, but VF has not entered the reset process, which may cause
IO not stopped problem when firmware is resetting VF.
This patch fixes it by adjusting the VF reset process, when VF
need to reset itself, it will enter the reset process first, and
it will tell the PF to send cmd to firmware to reset itself.
Add member reset_pending to struct hclgevf_dev, which indicates that
there is reset event need to be processed by the VF's reset task, and
the VF's reset task chooses the highest-level one and clears other
low-level one when it processes reset_pending.
hclge_inform_reset_assert_to_vf function is unused now, but it will
be used to support the PF reset with VF working, so declare it in
the header file.
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-09 22:07:47 +08:00
|
|
|
vport->vport_id);
|
2018-03-22 22:29:00 +08:00
|
|
|
|
net: hns3: adjust VF's reset process
Currently when VF need to reset itself, it will send a cmd to PF,
after receiving the VF reset requset, PF sends a cmd to inform
VF to enter the reset process and send a cmd to firmware to do the
actual reset for the VF, it is possible that firmware has resetted
the VF, but VF has not entered the reset process, which may cause
IO not stopped problem when firmware is resetting VF.
This patch fixes it by adjusting the VF reset process, when VF
need to reset itself, it will enter the reset process first, and
it will tell the PF to send cmd to firmware to reset itself.
Add member reset_pending to struct hclgevf_dev, which indicates that
there is reset event need to be processed by the VF's reset task, and
the VF's reset task chooses the highest-level one and clears other
low-level one when it processes reset_pending.
hclge_inform_reset_assert_to_vf function is unused now, but it will
be used to support the PF reset with VF working, so declare it in
the header file.
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-09 22:07:47 +08:00
|
|
|
ret = hclge_func_reset_cmd(hdev, vport->vport_id);
|
|
|
|
hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
|
2018-03-22 22:29:00 +08:00
|
|
|
}
|
|
|
|
|
2018-11-18 11:19:12 +08:00
|
|
|
static void hclge_vf_keep_alive(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
vport->last_active_jiffies = jiffies;
|
|
|
|
}
|
|
|
|
|
2018-11-18 11:19:13 +08:00
|
|
|
static int hclge_set_vf_mtu(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u32 mtu;
|
|
|
|
|
|
|
|
memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
|
|
|
|
ret = hclge_set_vport_mtu(vport, mtu);
|
|
|
|
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2018-12-15 23:31:57 +08:00
|
|
|
static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
{
|
|
|
|
u16 queue_id, qid_in_pf;
|
|
|
|
u8 resp_data[2];
|
|
|
|
|
|
|
|
memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
|
|
|
|
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
|
|
|
|
memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
|
|
|
|
|
|
|
|
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
|
|
|
|
}
|
|
|
|
|
2018-05-26 02:43:00 +08:00
|
|
|
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
|
|
|
|
{
|
|
|
|
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
|
|
|
|
|
|
|
|
return tail == hw->cmq.crq.next_to_use;
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
void hclge_mbx_handler(struct hclge_dev *hdev)
|
|
|
|
{
|
|
|
|
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req;
|
|
|
|
struct hclge_vport *vport;
|
|
|
|
struct hclge_desc *desc;
|
2018-03-08 19:41:52 +08:00
|
|
|
int ret, flag;
|
2017-12-15 02:03:07 +08:00
|
|
|
|
|
|
|
/* handle all the mailbox requests in the queue */
|
2018-05-26 02:43:00 +08:00
|
|
|
while (!hclge_cmd_crq_empty(&hdev->hw)) {
|
2018-10-30 21:50:51 +08:00
|
|
|
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
|
|
|
|
dev_warn(&hdev->pdev->dev,
|
|
|
|
"command queue needs re-initializing\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
desc = &crq->desc[crq->next_to_use];
|
|
|
|
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
|
|
|
|
|
2018-05-26 02:43:00 +08:00
|
|
|
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
|
2018-07-02 15:50:26 +08:00
|
|
|
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
|
2018-05-26 02:43:00 +08:00
|
|
|
dev_warn(&hdev->pdev->dev,
|
|
|
|
"dropped invalid mailbox message, code = %d\n",
|
|
|
|
req->msg[0]);
|
|
|
|
|
|
|
|
/* dropping/not processing this invalid message */
|
|
|
|
crq->desc[crq->next_to_use].flag = 0;
|
|
|
|
hclge_mbx_ring_ptr_move_crq(crq);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:03:07 +08:00
|
|
|
vport = &hdev->vport[req->mbx_src_vfid];
|
|
|
|
|
|
|
|
switch (req->msg[0]) {
|
2017-12-15 02:03:08 +08:00
|
|
|
case HCLGE_MBX_MAP_RING_TO_VECTOR:
|
|
|
|
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
|
|
|
|
req);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
|
|
|
|
ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
|
|
|
|
req);
|
|
|
|
break;
|
2017-12-15 02:03:07 +08:00
|
|
|
case HCLGE_MBX_SET_PROMISC_MODE:
|
|
|
|
ret = hclge_set_vf_promisc_mode(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to set VF promisc mode\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_SET_UNICAST:
|
2018-03-10 11:29:33 +08:00
|
|
|
ret = hclge_set_vf_uc_mac_addr(vport, req, true);
|
2017-12-15 02:03:07 +08:00
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to set VF UC MAC Addr\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_SET_MULTICAST:
|
|
|
|
ret = hclge_set_vf_mc_mac_addr(vport, req, false);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to set VF MC MAC Addr\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_SET_VLAN:
|
|
|
|
ret = hclge_set_vf_vlan_cfg(vport, req, false);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to config VF's VLAN\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2018-11-18 11:19:12 +08:00
|
|
|
case HCLGE_MBX_SET_ALIVE:
|
|
|
|
ret = hclge_set_vf_alive(vport, req, false);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to set VF's ALIVE\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2017-12-15 02:03:07 +08:00
|
|
|
case HCLGE_MBX_GET_QINFO:
|
|
|
|
ret = hclge_get_vf_queue_info(vport, req, true);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to get Q info for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_GET_TCINFO:
|
|
|
|
ret = hclge_get_vf_tcinfo(vport, req, true);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to get TC info for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
case HCLGE_MBX_GET_LINK_STATUS:
|
|
|
|
ret = hclge_get_link_info(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF fail(%d) to get link stat for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2017-12-15 02:03:08 +08:00
|
|
|
case HCLGE_MBX_QUEUE_RESET:
|
2018-03-21 15:49:21 +08:00
|
|
|
hclge_mbx_reset_vf_queue(vport, req);
|
2017-12-15 02:03:08 +08:00
|
|
|
break;
|
2018-03-22 22:29:00 +08:00
|
|
|
case HCLGE_MBX_RESET:
|
|
|
|
hclge_reset_vf(vport, req);
|
|
|
|
break;
|
2018-11-18 11:19:12 +08:00
|
|
|
case HCLGE_MBX_KEEP_ALIVE:
|
|
|
|
hclge_vf_keep_alive(vport, req);
|
|
|
|
break;
|
2018-11-18 11:19:13 +08:00
|
|
|
case HCLGE_MBX_SET_MTU:
|
|
|
|
ret = hclge_set_vf_mtu(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"VF fail(%d) to set mtu\n", ret);
|
|
|
|
break;
|
2018-12-15 23:31:57 +08:00
|
|
|
case HCLGE_MBX_GET_QID_IN_PF:
|
|
|
|
ret = hclge_get_queue_id_in_pf(vport, req);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"PF failed(%d) to get qid for VF\n",
|
|
|
|
ret);
|
|
|
|
break;
|
2017-12-15 02:03:07 +08:00
|
|
|
default:
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
"un-supported mailbox message, code = %d\n",
|
|
|
|
req->msg[0]);
|
|
|
|
break;
|
|
|
|
}
|
2018-03-08 19:41:51 +08:00
|
|
|
crq->desc[crq->next_to_use].flag = 0;
|
2017-12-15 02:03:07 +08:00
|
|
|
hclge_mbx_ring_ptr_move_crq(crq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
|
|
|
|
hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
|
|
|
|
}
|