Merge branch 'hns3-next'

Huazhong Tan says:

====================
code optimizations & bugfixes for HNS3 driver

This patchset includes bugfixes and code optimizations for the HNS3
ethernet controller driver
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-01-30 14:50:04 -08:00
commit 630afc7734
13 changed files with 146 additions and 105 deletions

View File

@ -238,7 +238,7 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo);
* @ae_dev: the AE device
* NOTE: the duplicated name will not be checked
*/
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
{
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
@ -259,6 +259,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
ret = -EOPNOTSUPP;
goto out_err;
}
@ -285,8 +286,15 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
ret);
}
out_err:
mutex_unlock(&hnae3_common_lock);
return 0;
out_err:
list_del(&ae_dev->node);
mutex_unlock(&hnae3_common_lock);
return ret;
}
EXPORT_SYMBOL(hnae3_register_ae_dev);

View File

@ -464,6 +464,8 @@ struct hnae3_ae_ops {
int (*set_gro_en)(struct hnae3_handle *handle, int enable);
u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
@ -587,7 +589,7 @@ struct hnae3_handle {
#define hnae3_get_bit(origin, shift) \
hnae3_get_field((origin), (0x1 << (shift)), (shift))
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);

View File

@ -655,11 +655,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
u8 *il4_proto)
{
union {
struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
} l3;
union l3_hdr_info l3;
unsigned char *l4_hdr;
unsigned char *exthdr;
u8 l4_proto_tmp;
@ -712,17 +708,8 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
u8 il4_proto, u32 *type_cs_vlan_tso,
u32 *ol_type_vlan_len_msec)
{
union {
struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
} l3;
union {
struct tcphdr *tcp;
struct udphdr *udp;
struct gre_base_hdr *gre;
unsigned char *hdr;
} l4;
union l3_hdr_info l3;
union l4_hdr_info l4;
unsigned char *l2_hdr;
u8 l4_proto = ol4_proto;
u32 ol2_len;
@ -821,12 +808,7 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
#define IANA_VXLAN_PORT 4789
union {
struct tcphdr *tcp;
struct udphdr *udp;
struct gre_base_hdr *gre;
unsigned char *hdr;
} l4;
union l4_hdr_info l4;
l4.hdr = skb_transport_header(skb);
@ -842,11 +824,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
u8 il4_proto, u32 *type_cs_vlan_tso,
u32 *ol_type_vlan_len_msec)
{
union {
struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
} l3;
union l3_hdr_info l3;
u32 l4_proto = ol4_proto;
l3.hdr = skb_network_header(skb);
@ -1774,9 +1752,13 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
hnae3_register_ae_dev(ae_dev);
ret = hnae3_register_ae_dev(ae_dev);
if (ret) {
devm_kfree(&pdev->dev, ae_dev);
pci_set_drvdata(pdev, NULL);
}
return 0;
return ret;
}
/* hns3_remove - Device removal routine
@ -3201,12 +3183,12 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
group->count = 0;
}
static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_ring_chain_node vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
int i, ret;
int i;
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
@ -3214,15 +3196,10 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
continue;
ret = hns3_get_vector_ring_chain(tqp_vector,
&vector_ring_chain);
if (ret)
return ret;
hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
ret = h->ae_algo->ops->unmap_ring_from_vector(h,
h->ae_algo->ops->unmap_ring_from_vector(h,
tqp_vector->vector_irq, &vector_ring_chain);
if (ret)
return ret;
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
@ -3238,8 +3215,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_clear_ring_group(&tqp_vector->tx_group);
netif_napi_del(&priv->tqp_vector[i].napi);
}
return 0;
}
static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
@ -3549,6 +3524,25 @@ static int hns3_init_mac_addr(struct net_device *netdev, bool init)
return ret;
}
static int hns3_init_phy(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = 0;
if (h->ae_algo->ops->mac_connect_phy)
ret = h->ae_algo->ops->mac_connect_phy(h);
return ret;
}
static void hns3_uninit_phy(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->mac_disconnect_phy)
h->ae_algo->ops->mac_disconnect_phy(h);
}
static int hns3_restore_fd_rules(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@ -3658,6 +3652,10 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_init_ring_data;
}
ret = hns3_init_phy(netdev);
if (ret)
goto out_init_phy;
ret = register_netdev(netdev);
if (ret) {
dev_err(priv->dev, "probe register netdev fail!\n");
@ -3682,8 +3680,11 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret;
out_reg_netdev_fail:
hns3_uninit_phy(netdev);
out_init_phy:
hns3_uninit_all_ring(priv);
out_init_ring_data:
(void)hns3_nic_uninit_vector_data(priv);
hns3_nic_uninit_vector_data(priv);
out_init_vector_data:
hns3_nic_dealloc_vector_data(priv);
out_alloc_vector_data:
@ -3716,9 +3717,9 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
if (ret)
netdev_err(netdev, "uninit vector error\n");
hns3_uninit_phy(netdev);
hns3_nic_uninit_vector_data(priv);
ret = hns3_nic_dealloc_vector_data(priv);
if (ret)
@ -4111,11 +4112,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
if (ret) {
netdev_err(netdev, "uninit vector error\n");
return ret;
}
hns3_nic_uninit_vector_data(priv);
hns3_store_coal(priv);

View File

@ -574,6 +574,7 @@ union l3_hdr_info {
union l4_hdr_info {
struct tcphdr *tcp;
struct udphdr *udp;
struct gre_base_hdr *gre;
unsigned char *hdr;
};

View File

@ -805,7 +805,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
old_desc_num, new_desc_num);
if (if_running)
dev_close(ndev);
ndev->netdev_ops->ndo_stop(ndev);
ret = hns3_uninit_all_ring(priv);
if (ret)
@ -822,7 +822,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
if (if_running)
ret = dev_open(ndev, NULL);
ret = ndev->netdev_ops->ndo_open(ndev);
return ret;
}
@ -1115,6 +1115,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_channels = hns3_get_channels,
.get_coalesce = hns3_get_coalesce,
.set_coalesce = hns3_set_coalesce,
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
.get_link = hns3_get_link,
};

View File

@ -93,13 +93,11 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
}
}
for (i = 0; i < hdev->num_alloc_vport; i++) {
if (num_tc > hdev->vport[i].alloc_tqps) {
dev_err(&hdev->pdev->dev,
"allocated tqp(%u) checking failed, %u > tqp(%u)\n",
i, num_tc, hdev->vport[i].alloc_tqps);
return -EINVAL;
}
if (num_tc > hdev->vport[0].alloc_tqps) {
dev_err(&hdev->pdev->dev,
"allocated tqp checking failed, %u > tqp(%u)\n",
num_tc, hdev->vport[0].alloc_tqps);
return -EINVAL;
}
return 0;

View File

@ -295,6 +295,14 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
static const u8 hclge_hash_key[] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@ -999,6 +1007,9 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.hw_pfc_map = 0;
hdev->wanted_umv_size = cfg.umv_space;
if (hnae3_dev_fd_supported(hdev))
hdev->fd_en = true;
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
@ -3652,8 +3663,11 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
static void hclge_rss_init_cfg(struct hclge_dev *hdev)
{
int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
int i;
if (hdev->pdev->revision >= 0x21)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
vport[i].rss_tuple_sets.ipv4_tcp_en =
@ -3673,9 +3687,10 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
vport[i].rss_algo = rss_algo;
netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
memcpy(vport[i].rss_hash_key, hclge_hash_key,
HCLGE_RSS_KEY_SIZE);
}
hclge_rss_indir_init_cfg(hdev);
@ -3961,7 +3976,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
return -EOPNOTSUPP;
}
hdev->fd_cfg.fd_en = true;
hdev->fd_cfg.proto_support =
TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
@ -4719,7 +4733,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
if (!hdev->fd_cfg.fd_en) {
if (!hdev->fd_en) {
dev_warn(&hdev->pdev->dev,
"Please enable flow director first\n");
return -EOPNOTSUPP;
@ -4872,7 +4886,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
return 0;
/* if fd is disabled, should not restore it when reset */
if (!hdev->fd_cfg.fd_en)
if (!hdev->fd_en)
return 0;
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
@ -5158,7 +5172,7 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
hdev->fd_cfg.fd_en = enable;
hdev->fd_en = enable;
if (!enable)
hclge_del_all_fd_entries(handle, false);
else
@ -7050,16 +7064,6 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
*tp_mdix = ETH_TP_MDI;
}
static int hclge_init_instance_hw(struct hclge_dev *hdev)
{
return hclge_mac_connect_phy(hdev);
}
static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
{
hclge_mac_disconnect_phy(hdev);
}
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@ -7079,13 +7083,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (ret)
goto clear_nic;
ret = hclge_init_instance_hw(hdev);
if (ret) {
client->ops->uninit_instance(&vport->nic,
0);
goto clear_nic;
}
hnae3_set_client_init_flag(client, ae_dev, 1);
if (hdev->roce_client &&
@ -7170,7 +7167,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (client->type == HNAE3_CLIENT_ROCE)
return;
if (hdev->nic_client && client->ops->uninit_instance) {
hclge_uninit_instance_hw(hdev);
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
@ -7389,7 +7385,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_init_umv_space(hdev);
if (ret) {
dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
goto err_msi_irq_uninit;
goto err_mdiobus_unreg;
}
ret = hclge_mac_init(hdev);
@ -8076,6 +8072,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_gro_en = hclge_gro_en,
.get_global_queue_id = hclge_covert_handle_qid_global,
.set_timer_task = hclge_set_timer_task,
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
};
static struct hnae3_ae_algo ae_algo = {

View File

@ -583,7 +583,6 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg {
u8 fd_mode;
u8 fd_en;
u16 max_key_length;
u32 proto_support;
u32 rule_num[2]; /* rule entry number */
@ -758,6 +757,7 @@ struct hclge_dev {
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
u16 hclge_fd_rule_num;
u8 fd_en;
u16 wanted_umv_size;
/* max available unicast mac vlan space */

View File

@ -319,10 +319,14 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
{
struct hclge_dev *hdev = vport->back;
int ret;
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u8 vf_tc_map = 0;
int i, ret;
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
for (i = 0; i < kinfo->num_tc; i++)
vf_tc_map |= BIT(i);
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
sizeof(u8));
return ret;

View File

@ -195,8 +195,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
netdev_err(netdev, "failed to configure flow control.\n");
}
int hclge_mac_connect_phy(struct hclge_dev *hdev)
int hclge_mac_connect_phy(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
@ -229,8 +231,10 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
return 0;
}
void hclge_mac_disconnect_phy(struct hclge_dev *hdev)
void hclge_mac_disconnect_phy(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct phy_device *phydev = hdev->hw.mac.phydev;
if (!phydev)

View File

@ -5,8 +5,8 @@
#define __HCLGE_MDIO_H
int hclge_mac_mdio_config(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hclge_dev *hdev);
void hclge_mac_disconnect_phy(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);

View File

@ -520,8 +520,14 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
u16 max_rss_size;
u8 i;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
kinfo->num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
/* TC configuration is shared by PF/VF in one port, only allow
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
kinfo->num_tc = vport->vport_id ? 1 :
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
(vport->vport_id ? (vport->vport_id - 1) : 0);
max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc);
@ -538,12 +544,12 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
}
kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
for (i = 0; i < HNAE3_MAX_TC; i++) {
if (hdev->hw_tc_map & BIT(i)) {
if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
kinfo->tc_info[i].enable = true;
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
kinfo->tc_info[i].tqp_count = kinfo->rss_size;
@ -766,13 +772,17 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
/* Cfg qs -> pri mapping, one by one mapping */
for (k = 0; k < hdev->num_alloc_vport; k++)
for (i = 0; i < hdev->tm_info.num_tc; i++) {
for (k = 0; k < hdev->num_alloc_vport; k++) {
struct hnae3_knic_private_info *kinfo =
&vport[k].nic.kinfo;
for (i = 0; i < kinfo->num_tc; i++) {
ret = hclge_tm_qs_to_pri_map_cfg(
hdev, vport[k].qs_offset + i, i);
if (ret)
return ret;
}
}
} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
/* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
for (k = 0; k < hdev->num_alloc_vport; k++)

View File

@ -21,6 +21,14 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
{0, }
};
static const u8 hclgevf_hash_key[] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
@ -78,7 +86,12 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
struct hnae3_handle *handle)
{
return container_of(handle, struct hclgevf_dev, nic);
if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic);
else if (handle->client->type == HNAE3_CLIENT_ROCE)
return container_of(handle, struct hclgevf_dev, roce);
else
return container_of(handle, struct hclgevf_dev, nic);
}
static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
@ -1611,6 +1624,10 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
int ret;
hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
0, false, &respmsg, sizeof(u8));
if (ret)
@ -1789,9 +1806,9 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
rss_cfg->rss_size = hdev->rss_size_max;
if (hdev->pdev->revision >= 0x21) {
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
netdev_rss_key_fill(rss_cfg->rss_hash_key,
HCLGEVF_RSS_KEY_SIZE);
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
rss_cfg->rss_hash_key);