Merge branch 'hns3-a-few-code-improvements'
Peng Li says: ==================== net: hns3: a few code improvements This patchset removes some redundant code and fixes a few code stylistic issues from internal concentrated review, no functional changes introduced. --- Change log: V1 -> V2: 1, remove a patch according to the comment reported by David Miller. --- ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f6779e4e53
|
@ -85,10 +85,12 @@ config HNS3
|
|||
drivers(like ODP)to register with HNAE devices and their associated
|
||||
operations.
|
||||
|
||||
if HNS3
|
||||
|
||||
config HNS3_HCLGE
|
||||
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
|
||||
default m
|
||||
depends on PCI_MSI
|
||||
depends on HNS3
|
||||
---help---
|
||||
This selects the HNS3_HCLGE network acceleration engine & its hardware
|
||||
compatibility layer. The engine would be used in Hisilicon hip08 family of
|
||||
|
@ -97,7 +99,7 @@ config HNS3_HCLGE
|
|||
config HNS3_DCB
|
||||
bool "Hisilicon HNS3 Data Center Bridge Support"
|
||||
default n
|
||||
depends on HNS3 && HNS3_HCLGE && DCB
|
||||
depends on HNS3_HCLGE && DCB
|
||||
---help---
|
||||
Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
|
||||
|
||||
|
@ -106,7 +108,6 @@ config HNS3_DCB
|
|||
config HNS3_HCLGEVF
|
||||
tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
|
||||
depends on PCI_MSI
|
||||
depends on HNS3
|
||||
depends on HNS3_HCLGE
|
||||
---help---
|
||||
This selects the HNS3 VF drivers network acceleration engine & its hardware
|
||||
|
@ -115,11 +116,13 @@ config HNS3_HCLGEVF
|
|||
|
||||
config HNS3_ENET
|
||||
tristate "Hisilicon HNS3 Ethernet Device Support"
|
||||
default m
|
||||
depends on 64BIT && PCI
|
||||
depends on HNS3
|
||||
---help---
|
||||
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
|
||||
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
|
||||
devices and their associated operations.
|
||||
|
||||
endif #HNS3
|
||||
|
||||
endif # NET_VENDOR_HISILICON
|
||||
|
|
|
@ -40,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
|
|||
{
|
||||
switch (client->type) {
|
||||
case HNAE3_CLIENT_KNIC:
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
|
||||
break;
|
||||
case HNAE3_CLIENT_UNIC:
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
|
||||
break;
|
||||
case HNAE3_CLIENT_ROCE:
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -60,15 +60,15 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
|
|||
|
||||
switch (client->type) {
|
||||
case HNAE3_CLIENT_KNIC:
|
||||
inited = hnae_get_bit(ae_dev->flag,
|
||||
inited = hnae3_get_bit(ae_dev->flag,
|
||||
HNAE3_KNIC_CLIENT_INITED_B);
|
||||
break;
|
||||
case HNAE3_CLIENT_UNIC:
|
||||
inited = hnae_get_bit(ae_dev->flag,
|
||||
inited = hnae3_get_bit(ae_dev->flag,
|
||||
HNAE3_UNIC_CLIENT_INITED_B);
|
||||
break;
|
||||
case HNAE3_CLIENT_ROCE:
|
||||
inited = hnae_get_bit(ae_dev->flag,
|
||||
inited = hnae3_get_bit(ae_dev->flag,
|
||||
HNAE3_ROCE_CLIENT_INITED_B);
|
||||
break;
|
||||
default:
|
||||
|
@ -85,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
|
|||
|
||||
/* check if this client matches the type of ae_dev */
|
||||
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
|
||||
hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
|
||||
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
|
|||
continue;
|
||||
}
|
||||
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
|
||||
|
||||
/* check the client list for the match with this ae_dev type and
|
||||
* initialize the figure out client instance
|
||||
|
@ -220,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
|
|||
mutex_lock(&hnae3_common_lock);
|
||||
/* Check if there are matched ae_dev */
|
||||
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
|
||||
if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
|
||||
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
|
||||
continue;
|
||||
|
||||
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
|
||||
|
@ -234,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
|
|||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
|
||||
ae_algo->ops->uninit_ae_dev(ae_dev);
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
}
|
||||
|
||||
list_del(&ae_algo->node);
|
||||
|
@ -278,7 +278,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -310,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
|
|||
mutex_lock(&hnae3_common_lock);
|
||||
/* Check if there are matched ae_algo */
|
||||
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
|
||||
if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
|
||||
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
|
||||
continue;
|
||||
|
||||
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
|
||||
|
@ -321,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
|
|||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
|
||||
ae_algo->ops->uninit_ae_dev(ae_dev);
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
}
|
||||
|
||||
list_del(&ae_dev->node);
|
||||
|
|
|
@ -62,10 +62,10 @@
|
|||
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
|
||||
|
||||
#define hnae3_dev_roce_supported(hdev) \
|
||||
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
|
||||
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
|
||||
|
||||
#define hnae3_dev_dcb_supported(hdev) \
|
||||
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
|
||||
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
|
||||
|
||||
#define ring_ptr_move_fw(ring, p) \
|
||||
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
|
||||
|
@ -167,7 +167,6 @@ struct hnae3_client_ops {
|
|||
#define HNAE3_CLIENT_NAME_LENGTH 16
|
||||
struct hnae3_client {
|
||||
char name[HNAE3_CLIENT_NAME_LENGTH];
|
||||
u16 version;
|
||||
unsigned long state;
|
||||
enum hnae3_client_type type;
|
||||
const struct hnae3_client_ops *ops;
|
||||
|
@ -436,7 +435,6 @@ struct hnae3_dcb_ops {
|
|||
struct hnae3_ae_algo {
|
||||
const struct hnae3_ae_ops *ops;
|
||||
struct list_head node;
|
||||
char name[HNAE3_CLASS_NAME_SIZE];
|
||||
const struct pci_device_id *pdev_id_table;
|
||||
};
|
||||
|
||||
|
@ -509,17 +507,17 @@ struct hnae3_handle {
|
|||
u32 numa_node_mask; /* for multi-chip support */
|
||||
};
|
||||
|
||||
#define hnae_set_field(origin, mask, shift, val) \
|
||||
#define hnae3_set_field(origin, mask, shift, val) \
|
||||
do { \
|
||||
(origin) &= (~(mask)); \
|
||||
(origin) |= ((val) << (shift)) & (mask); \
|
||||
} while (0)
|
||||
#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
|
||||
#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
|
||||
|
||||
#define hnae_set_bit(origin, shift, val) \
|
||||
hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
|
||||
#define hnae_get_bit(origin, shift) \
|
||||
hnae_get_field((origin), (0x1 << (shift)), (shift))
|
||||
#define hnae3_set_bit(origin, shift, val) \
|
||||
hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
|
||||
#define hnae3_get_bit(origin, shift) \
|
||||
hnae3_get_field((origin), (0x1 << (shift)), (shift))
|
||||
|
||||
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
|
||||
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
|
||||
|
|
|
@ -493,7 +493,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
|
|||
|
||||
/* find the txbd field values */
|
||||
*paylen = skb->len - hdr_len;
|
||||
hnae_set_bit(*type_cs_vlan_tso,
|
||||
hnae3_set_bit(*type_cs_vlan_tso,
|
||||
HNS3_TXD_TSO_B, 1);
|
||||
|
||||
/* get MSS for TSO */
|
||||
|
@ -586,20 +586,20 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
|
|||
|
||||
/* compute L2 header size for normal packet, defined in 2 Bytes */
|
||||
l2_len = l3.hdr - skb->data;
|
||||
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
|
||||
hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
|
||||
HNS3_TXD_L2LEN_S, l2_len >> 1);
|
||||
|
||||
/* tunnel packet*/
|
||||
if (skb->encapsulation) {
|
||||
/* compute OL2 header size, defined in 2 Bytes */
|
||||
ol2_len = l2_len;
|
||||
hnae_set_field(*ol_type_vlan_len_msec,
|
||||
hnae3_set_field(*ol_type_vlan_len_msec,
|
||||
HNS3_TXD_L2LEN_M,
|
||||
HNS3_TXD_L2LEN_S, ol2_len >> 1);
|
||||
|
||||
/* compute OL3 header size, defined in 4 Bytes */
|
||||
ol3_len = l4.hdr - l3.hdr;
|
||||
hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
|
||||
hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
|
||||
HNS3_TXD_L3LEN_S, ol3_len >> 2);
|
||||
|
||||
/* MAC in UDP, MAC in GRE (0x6558)*/
|
||||
|
@ -609,15 +609,16 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
|
|||
|
||||
/* compute OL4 header size, defined in 4 Bytes. */
|
||||
ol4_len = l2_hdr - l4.hdr;
|
||||
hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
|
||||
HNS3_TXD_L4LEN_S, ol4_len >> 2);
|
||||
hnae3_set_field(*ol_type_vlan_len_msec,
|
||||
HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
|
||||
ol4_len >> 2);
|
||||
|
||||
/* switch IP header ptr from outer to inner header */
|
||||
l3.hdr = skb_inner_network_header(skb);
|
||||
|
||||
/* compute inner l2 header size, defined in 2 Bytes. */
|
||||
l2_len = l3.hdr - l2_hdr;
|
||||
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
|
||||
hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
|
||||
HNS3_TXD_L2LEN_S, l2_len >> 1);
|
||||
} else {
|
||||
/* skb packet types not supported by hardware,
|
||||
|
@ -634,22 +635,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
|
|||
|
||||
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
|
||||
l3_len = l4.hdr - l3.hdr;
|
||||
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
|
||||
hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
|
||||
HNS3_TXD_L3LEN_S, l3_len >> 2);
|
||||
|
||||
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
|
||||
switch (l4_proto) {
|
||||
case IPPROTO_TCP:
|
||||
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
|
||||
hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
|
||||
HNS3_TXD_L4LEN_S, l4.tcp->doff);
|
||||
break;
|
||||
case IPPROTO_SCTP:
|
||||
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
|
||||
HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
|
||||
hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
|
||||
HNS3_TXD_L4LEN_S,
|
||||
(sizeof(struct sctphdr) >> 2));
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
|
||||
HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
|
||||
hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
|
||||
HNS3_TXD_L4LEN_S,
|
||||
(sizeof(struct udphdr) >> 2));
|
||||
break;
|
||||
default:
|
||||
/* skb packet types not supported by hardware,
|
||||
|
@ -703,29 +706,31 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
|
|||
/* define outer network header type.*/
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (skb_is_gso(skb))
|
||||
hnae_set_field(*ol_type_vlan_len_msec,
|
||||
HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
|
||||
hnae3_set_field(*ol_type_vlan_len_msec,
|
||||
HNS3_TXD_OL3T_M,
|
||||
HNS3_TXD_OL3T_S,
|
||||
HNS3_OL3T_IPV4_CSUM);
|
||||
else
|
||||
hnae_set_field(*ol_type_vlan_len_msec,
|
||||
HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
|
||||
hnae3_set_field(*ol_type_vlan_len_msec,
|
||||
HNS3_TXD_OL3T_M,
|
||||
HNS3_TXD_OL3T_S,
|
||||
HNS3_OL3T_IPV4_NO_CSUM);
|
||||
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
|
||||
hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
|
||||
HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
|
||||
}
|
||||
|
||||
/* define tunnel type(OL4).*/
|
||||
switch (l4_proto) {
|
||||
case IPPROTO_UDP:
|
||||
hnae_set_field(*ol_type_vlan_len_msec,
|
||||
hnae3_set_field(*ol_type_vlan_len_msec,
|
||||
HNS3_TXD_TUNTYPE_M,
|
||||
HNS3_TXD_TUNTYPE_S,
|
||||
HNS3_TUN_MAC_IN_UDP);
|
||||
break;
|
||||
case IPPROTO_GRE:
|
||||
hnae_set_field(*ol_type_vlan_len_msec,
|
||||
hnae3_set_field(*ol_type_vlan_len_msec,
|
||||
HNS3_TXD_TUNTYPE_M,
|
||||
HNS3_TXD_TUNTYPE_S,
|
||||
HNS3_TUN_NVGRE);
|
||||
|
@ -749,25 +754,25 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
|
|||
}
|
||||
|
||||
if (l3.v4->version == 4) {
|
||||
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
|
||||
hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
|
||||
HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
|
||||
|
||||
/* the stack computes the IP header already, the only time we
|
||||
* need the hardware to recompute it is in the case of TSO.
|
||||
*/
|
||||
if (skb_is_gso(skb))
|
||||
hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
|
||||
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
|
||||
|
||||
hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
||||
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
||||
} else if (l3.v6->version == 6) {
|
||||
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
|
||||
hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
|
||||
HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
|
||||
hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
||||
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
||||
}
|
||||
|
||||
switch (l4_proto) {
|
||||
case IPPROTO_TCP:
|
||||
hnae_set_field(*type_cs_vlan_tso,
|
||||
hnae3_set_field(*type_cs_vlan_tso,
|
||||
HNS3_TXD_L4T_M,
|
||||
HNS3_TXD_L4T_S,
|
||||
HNS3_L4T_TCP);
|
||||
|
@ -776,13 +781,13 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
|
|||
if (hns3_tunnel_csum_bug(skb))
|
||||
break;
|
||||
|
||||
hnae_set_field(*type_cs_vlan_tso,
|
||||
hnae3_set_field(*type_cs_vlan_tso,
|
||||
HNS3_TXD_L4T_M,
|
||||
HNS3_TXD_L4T_S,
|
||||
HNS3_L4T_UDP);
|
||||
break;
|
||||
case IPPROTO_SCTP:
|
||||
hnae_set_field(*type_cs_vlan_tso,
|
||||
hnae3_set_field(*type_cs_vlan_tso,
|
||||
HNS3_TXD_L4T_M,
|
||||
HNS3_TXD_L4T_S,
|
||||
HNS3_L4T_SCTP);
|
||||
|
@ -807,11 +812,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
|
|||
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
|
||||
{
|
||||
/* Config bd buffer end */
|
||||
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
|
||||
hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
|
||||
HNS3_TXD_BDTYPE_S, 0);
|
||||
hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
|
||||
hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
|
||||
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
|
||||
hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
|
||||
hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
|
||||
hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
|
||||
}
|
||||
|
||||
static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
||||
|
@ -844,10 +849,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
|||
* and use inner_vtag in one tag case.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_8021Q)) {
|
||||
hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
|
||||
hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
|
||||
*out_vtag = vlan_tag;
|
||||
} else {
|
||||
hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
|
||||
hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
|
||||
*inner_vtag = vlan_tag;
|
||||
}
|
||||
} else if (skb->protocol == htons(ETH_P_8021Q)) {
|
||||
|
@ -1135,7 +1140,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
|
||||
wmb(); /* Commit all data before submit */
|
||||
|
||||
hnae_queue_xmit(ring->tqp, buf_num);
|
||||
hnae3_queue_xmit(ring->tqp, buf_num);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
|
@ -1703,7 +1708,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
|
|||
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
|
||||
struct hns3_desc_cb *cb)
|
||||
{
|
||||
unsigned int order = hnae_page_order(ring);
|
||||
unsigned int order = hnae3_page_order(ring);
|
||||
struct page *p;
|
||||
|
||||
p = dev_alloc_pages(order);
|
||||
|
@ -1714,7 +1719,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
|
|||
cb->page_offset = 0;
|
||||
cb->reuse_flag = 0;
|
||||
cb->buf = page_address(p);
|
||||
cb->length = hnae_page_size(ring);
|
||||
cb->length = hnae3_page_size(ring);
|
||||
cb->type = DESC_TYPE_PAGE;
|
||||
|
||||
return 0;
|
||||
|
@ -1780,33 +1785,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
|
|||
/* free desc along with its attached buffer */
|
||||
static void hns3_free_desc(struct hns3_enet_ring *ring)
|
||||
{
|
||||
int size = ring->desc_num * sizeof(ring->desc[0]);
|
||||
|
||||
hns3_free_buffers(ring);
|
||||
|
||||
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
|
||||
ring->desc_num * sizeof(ring->desc[0]),
|
||||
DMA_BIDIRECTIONAL);
|
||||
ring->desc_dma_addr = 0;
|
||||
kfree(ring->desc);
|
||||
if (ring->desc) {
|
||||
dma_free_coherent(ring_to_dev(ring), size,
|
||||
ring->desc, ring->desc_dma_addr);
|
||||
ring->desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int hns3_alloc_desc(struct hns3_enet_ring *ring)
|
||||
{
|
||||
int size = ring->desc_num * sizeof(ring->desc[0]);
|
||||
|
||||
ring->desc = kzalloc(size, GFP_KERNEL);
|
||||
ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
|
||||
&ring->desc_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!ring->desc)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
|
||||
size, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
|
||||
ring->desc_dma_addr = 0;
|
||||
kfree(ring->desc);
|
||||
ring->desc = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1887,7 +1886,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
|
|||
|
||||
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
|
||||
(*bytes) += desc_cb->length;
|
||||
/* desc_cb will be cleaned, after hnae_free_buffer_detach*/
|
||||
/* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
|
||||
hns3_free_buffer_detach(ring, ring->next_to_clean);
|
||||
|
||||
ring_ptr_move_fw(ring, next_to_clean);
|
||||
|
@ -2016,15 +2015,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
|
|||
bool twobufs;
|
||||
|
||||
twobufs = ((PAGE_SIZE < 8192) &&
|
||||
hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
|
||||
hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
|
||||
|
||||
desc = &ring->desc[ring->next_to_clean];
|
||||
size = le16_to_cpu(desc->rx.size);
|
||||
|
||||
truesize = hnae_buf_size(ring);
|
||||
truesize = hnae3_buf_size(ring);
|
||||
|
||||
if (!twobufs)
|
||||
last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
|
||||
last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
|
||||
|
||||
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
|
||||
size - pull_len, truesize);
|
||||
|
@ -2076,13 +2075,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
|
|||
return;
|
||||
|
||||
/* check if hardware has done checksum */
|
||||
if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
|
||||
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
|
||||
return;
|
||||
|
||||
if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
|
||||
hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
|
||||
hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
|
||||
hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
|
||||
if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
|
||||
hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
|
||||
hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
|
||||
hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
|
||||
netdev_err(netdev, "L3/L4 error pkt\n");
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.l3l4_csum_err++;
|
||||
|
@ -2091,12 +2090,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
|
|||
return;
|
||||
}
|
||||
|
||||
l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
|
||||
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
|
||||
HNS3_RXD_L3ID_S);
|
||||
l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
|
||||
l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
|
||||
HNS3_RXD_L4ID_S);
|
||||
|
||||
ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
|
||||
ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
|
||||
HNS3_RXD_OL4ID_S);
|
||||
switch (ol4_type) {
|
||||
case HNS3_OL4_TYPE_MAC_IN_UDP:
|
||||
case HNS3_OL4_TYPE_NVGRE:
|
||||
|
@ -2135,7 +2135,7 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
|
|||
#define HNS3_STRP_OUTER_VLAN 0x1
|
||||
#define HNS3_STRP_INNER_VLAN 0x2
|
||||
|
||||
switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
|
||||
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
|
||||
HNS3_RXD_STRP_TAGP_S)) {
|
||||
case HNS3_STRP_OUTER_VLAN:
|
||||
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
|
||||
|
@ -2174,7 +2174,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|||
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
|
||||
|
||||
/* Check valid BD */
|
||||
if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
|
||||
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
|
||||
return -EFAULT;
|
||||
|
||||
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
|
||||
|
@ -2229,7 +2229,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|||
hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
|
||||
ring_ptr_move_fw(ring, next_to_clean);
|
||||
|
||||
while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
|
||||
while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
|
||||
desc = &ring->desc[ring->next_to_clean];
|
||||
desc_cb = &ring->desc_cb[ring->next_to_clean];
|
||||
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
|
||||
|
@ -2257,7 +2257,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|||
vlan_tag);
|
||||
}
|
||||
|
||||
if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
|
||||
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
|
||||
netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
|
||||
((u64 *)desc)[0], ((u64 *)desc)[1]);
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
|
@ -2269,7 +2269,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|||
}
|
||||
|
||||
if (unlikely((!desc->rx.pkt_len) ||
|
||||
hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
|
||||
hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
|
||||
netdev_err(netdev, "truncated pkt\n");
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.err_pkt_len++;
|
||||
|
@ -2279,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
|
||||
if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
|
||||
netdev_err(netdev, "L2 error pkt\n");
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.l2_err++;
|
||||
|
@ -2532,9 +2532,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|||
tx_ring = tqp_vector->tx_group.ring;
|
||||
if (tx_ring) {
|
||||
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
|
||||
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
|
||||
hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
|
||||
HNAE3_RING_TYPE_TX);
|
||||
hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
|
||||
|
||||
cur_chain->next = NULL;
|
||||
|
@ -2549,9 +2549,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|||
|
||||
cur_chain->next = chain;
|
||||
chain->tqp_index = tx_ring->tqp->tqp_index;
|
||||
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
|
||||
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
|
||||
HNAE3_RING_TYPE_TX);
|
||||
hnae_set_field(chain->int_gl_idx,
|
||||
hnae3_set_field(chain->int_gl_idx,
|
||||
HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S,
|
||||
HNAE3_RING_GL_TX);
|
||||
|
@ -2564,9 +2564,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|||
if (!tx_ring && rx_ring) {
|
||||
cur_chain->next = NULL;
|
||||
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
|
||||
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
|
||||
hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
|
||||
HNAE3_RING_TYPE_RX);
|
||||
hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
|
||||
|
||||
rx_ring = rx_ring->next;
|
||||
|
@ -2579,9 +2579,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|||
|
||||
cur_chain->next = chain;
|
||||
chain->tqp_index = rx_ring->tqp->tqp_index;
|
||||
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
|
||||
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
|
||||
HNAE3_RING_TYPE_RX);
|
||||
hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
|
||||
|
||||
cur_chain = chain;
|
||||
|
@ -2805,7 +2805,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
|
|||
ring->io_base = q->io_base;
|
||||
}
|
||||
|
||||
hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
|
||||
hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
|
||||
|
||||
ring->tqp = q;
|
||||
ring->desc = NULL;
|
||||
|
|
|
@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector {
|
|||
|
||||
u16 num_tqps; /* total number of tqps in TQP vector */
|
||||
|
||||
cpumask_t affinity_mask;
|
||||
char name[HNAE3_INT_NAME_LEN];
|
||||
|
||||
/* when 0 should adjust interrupt coalesce parameter */
|
||||
|
@ -591,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
|
|||
#define hns3_write_dev(a, reg, value) \
|
||||
hns3_write_reg((a)->io_base, (reg), (value))
|
||||
|
||||
#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
|
||||
#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
|
||||
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
|
||||
|
||||
#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
|
||||
|
@ -601,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
|
|||
|
||||
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
|
||||
|
||||
#define hnae_buf_size(_ring) ((_ring)->buf_size)
|
||||
#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
|
||||
#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
|
||||
#define hnae3_buf_size(_ring) ((_ring)->buf_size)
|
||||
#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
|
||||
#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
|
||||
|
||||
/* iterator for handling rings in ring group */
|
||||
#define hns3_for_each_ring(pos, head) \
|
||||
|
|
|
@ -18,8 +18,7 @@
|
|||
#include "hclge_main.h"
|
||||
|
||||
#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
|
||||
#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE)
|
||||
|
||||
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
|
||||
|
||||
static int hclge_ring_space(struct hclge_cmq_ring *ring)
|
||||
|
@ -46,32 +45,25 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
|
|||
{
|
||||
int size = ring->desc_num * sizeof(struct hclge_desc);
|
||||
|
||||
ring->desc = kzalloc(size, GFP_KERNEL);
|
||||
ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
|
||||
size, &ring->desc_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!ring->desc)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
|
||||
size, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
|
||||
ring->desc_dma_addr = 0;
|
||||
kfree(ring->desc);
|
||||
ring->desc = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
|
||||
{
|
||||
dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
|
||||
ring->desc_num * sizeof(ring->desc[0]),
|
||||
DMA_BIDIRECTIONAL);
|
||||
int size = ring->desc_num * sizeof(struct hclge_desc);
|
||||
|
||||
ring->desc_dma_addr = 0;
|
||||
kfree(ring->desc);
|
||||
if (ring->desc) {
|
||||
dma_free_coherent(cmq_ring_to_dev(ring), size,
|
||||
ring->desc, ring->desc_dma_addr);
|
||||
ring->desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
|
||||
{
|
||||
|
@ -111,8 +103,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
|
|||
|
||||
if (is_read)
|
||||
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
|
||||
else
|
||||
desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
|
||||
}
|
||||
|
||||
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
|
||||
|
@ -154,31 +144,20 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
|
|||
{
|
||||
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
|
||||
struct hclge_cmq_ring *csq = &hw->cmq.csq;
|
||||
u16 ntc = csq->next_to_clean;
|
||||
struct hclge_desc *desc;
|
||||
int clean = 0;
|
||||
u32 head;
|
||||
int clean;
|
||||
|
||||
desc = &csq->desc[ntc];
|
||||
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
|
||||
rmb(); /* Make sure head is ready before touch any data */
|
||||
|
||||
if (!is_valid_csq_clean_head(csq, head)) {
|
||||
dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
|
||||
dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
|
||||
csq->next_to_use, csq->next_to_clean);
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (head != ntc) {
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
ntc++;
|
||||
if (ntc == csq->desc_num)
|
||||
ntc = 0;
|
||||
desc = &csq->desc[ntc];
|
||||
clean++;
|
||||
}
|
||||
csq->next_to_clean = ntc;
|
||||
|
||||
clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
|
||||
csq->next_to_clean = head;
|
||||
return clean;
|
||||
}
|
||||
|
||||
|
@ -256,33 +235,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
|
|||
*/
|
||||
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
|
||||
do {
|
||||
if (hclge_cmd_csq_done(hw))
|
||||
if (hclge_cmd_csq_done(hw)) {
|
||||
complete = true;
|
||||
break;
|
||||
}
|
||||
udelay(1);
|
||||
timeout++;
|
||||
} while (timeout < hw->cmq.tx_timeout);
|
||||
}
|
||||
|
||||
if (hclge_cmd_csq_done(hw)) {
|
||||
complete = true;
|
||||
if (!complete) {
|
||||
retval = -EAGAIN;
|
||||
} else {
|
||||
handle = 0;
|
||||
while (handle < num) {
|
||||
/* Get the result of hardware write back */
|
||||
desc_to_use = &hw->cmq.csq.desc[ntc];
|
||||
desc[handle] = *desc_to_use;
|
||||
pr_debug("Get cmd desc:\n");
|
||||
|
||||
if (likely(!hclge_is_special_opcode(opcode)))
|
||||
desc_ret = le16_to_cpu(desc[handle].retval);
|
||||
else
|
||||
desc_ret = le16_to_cpu(desc[0].retval);
|
||||
|
||||
if ((enum hclge_cmd_return_status)desc_ret ==
|
||||
HCLGE_CMD_EXEC_SUCCESS)
|
||||
if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
|
||||
retval = 0;
|
||||
else
|
||||
retval = -EIO;
|
||||
hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
|
||||
hw->cmq.last_status = desc_ret;
|
||||
ntc++;
|
||||
handle++;
|
||||
if (ntc == hw->cmq.csq.desc_num)
|
||||
|
@ -290,9 +270,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
|
|||
}
|
||||
}
|
||||
|
||||
if (!complete)
|
||||
retval = -EAGAIN;
|
||||
|
||||
/* Clean the command send queue */
|
||||
handle = hclge_cmd_csq_clean(hw);
|
||||
if (handle != num) {
|
||||
|
|
|
@ -939,7 +939,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
|
|||
|
||||
if (hnae3_dev_roce_supported(hdev)) {
|
||||
hdev->num_roce_msi =
|
||||
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
|
||||
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
|
||||
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
|
||||
|
||||
/* PF should have NIC vectors and Roce vectors,
|
||||
|
@ -948,7 +948,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
|
|||
hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
|
||||
} else {
|
||||
hdev->num_msi =
|
||||
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
|
||||
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
|
||||
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
|
||||
}
|
||||
|
||||
|
@ -1038,36 +1038,36 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
|
|||
req = (struct hclge_cfg_param_cmd *)desc[0].data;
|
||||
|
||||
/* get the configuration */
|
||||
cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
|
||||
cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
|
||||
HCLGE_CFG_VMDQ_M,
|
||||
HCLGE_CFG_VMDQ_S);
|
||||
cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
|
||||
cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
|
||||
HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
|
||||
cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
|
||||
cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
|
||||
HCLGE_CFG_TQP_DESC_N_M,
|
||||
HCLGE_CFG_TQP_DESC_N_S);
|
||||
|
||||
cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
|
||||
cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
|
||||
HCLGE_CFG_PHY_ADDR_M,
|
||||
HCLGE_CFG_PHY_ADDR_S);
|
||||
cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
|
||||
cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
|
||||
HCLGE_CFG_MEDIA_TP_M,
|
||||
HCLGE_CFG_MEDIA_TP_S);
|
||||
cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
|
||||
cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
|
||||
HCLGE_CFG_RX_BUF_LEN_M,
|
||||
HCLGE_CFG_RX_BUF_LEN_S);
|
||||
/* get mac_address */
|
||||
mac_addr_tmp = __le32_to_cpu(req->param[2]);
|
||||
mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
|
||||
mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
|
||||
HCLGE_CFG_MAC_ADDR_H_M,
|
||||
HCLGE_CFG_MAC_ADDR_H_S);
|
||||
|
||||
mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
|
||||
|
||||
cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
|
||||
cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
|
||||
HCLGE_CFG_DEFAULT_SPEED_M,
|
||||
HCLGE_CFG_DEFAULT_SPEED_S);
|
||||
cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
|
||||
cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
|
||||
HCLGE_CFG_RSS_SIZE_M,
|
||||
HCLGE_CFG_RSS_SIZE_S);
|
||||
|
||||
|
@ -1077,7 +1077,7 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
|
|||
req = (struct hclge_cfg_param_cmd *)desc[1].data;
|
||||
cfg->numa_node_map = __le32_to_cpu(req->param[0]);
|
||||
|
||||
cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
|
||||
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
|
||||
HCLGE_CFG_SPEED_ABILITY_M,
|
||||
HCLGE_CFG_SPEED_ABILITY_S);
|
||||
}
|
||||
|
@ -1098,10 +1098,10 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
|
|||
req = (struct hclge_cfg_param_cmd *)desc[i].data;
|
||||
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
|
||||
true);
|
||||
hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
|
||||
hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
|
||||
HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
|
||||
/* Len should be united by 4 bytes when send to hardware */
|
||||
hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
|
||||
hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
|
||||
HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
|
||||
req->offset = cpu_to_le32(offset);
|
||||
}
|
||||
|
@ -1189,7 +1189,7 @@ static int hclge_configure(struct hclge_dev *hdev)
|
|||
|
||||
/* Currently not support uncontiuous tc */
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++)
|
||||
hnae_set_bit(hdev->hw_tc_map, i, 1);
|
||||
hnae3_set_bit(hdev->hw_tc_map, i, 1);
|
||||
|
||||
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
|
||||
|
||||
|
@ -1208,12 +1208,12 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
|
|||
req = (struct hclge_cfg_tso_status_cmd *)desc.data;
|
||||
|
||||
tso_mss = 0;
|
||||
hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
|
||||
hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
|
||||
HCLGE_TSO_MSS_MIN_S, tso_mss_min);
|
||||
req->tso_mss_min = cpu_to_le16(tso_mss);
|
||||
|
||||
tso_mss = 0;
|
||||
hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
|
||||
hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
|
||||
HCLGE_TSO_MSS_MIN_S, tso_mss_max);
|
||||
req->tso_mss_max = cpu_to_le16(tso_mss);
|
||||
|
||||
|
@ -2118,39 +2118,39 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
|
|||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
|
||||
|
||||
hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
|
||||
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
|
||||
|
||||
switch (speed) {
|
||||
case HCLGE_MAC_SPEED_10M:
|
||||
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
HCLGE_CFG_SPEED_S, 6);
|
||||
break;
|
||||
case HCLGE_MAC_SPEED_100M:
|
||||
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
HCLGE_CFG_SPEED_S, 7);
|
||||
break;
|
||||
case HCLGE_MAC_SPEED_1G:
|
||||
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
HCLGE_CFG_SPEED_S, 0);
|
||||
break;
|
||||
case HCLGE_MAC_SPEED_10G:
|
||||
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
HCLGE_CFG_SPEED_S, 1);
|
||||
break;
|
||||
case HCLGE_MAC_SPEED_25G:
|
||||
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
HCLGE_CFG_SPEED_S, 2);
|
||||
break;
|
||||
case HCLGE_MAC_SPEED_40G:
|
||||
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
HCLGE_CFG_SPEED_S, 3);
|
||||
break;
|
||||
case HCLGE_MAC_SPEED_50G:
|
||||
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
HCLGE_CFG_SPEED_S, 4);
|
||||
break;
|
||||
case HCLGE_MAC_SPEED_100G:
|
||||
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||
HCLGE_CFG_SPEED_S, 5);
|
||||
break;
|
||||
default:
|
||||
|
@ -2158,7 +2158,7 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
|
||||
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
|
||||
1);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
|
@ -2201,8 +2201,8 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
|
|||
return ret;
|
||||
}
|
||||
|
||||
*duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
|
||||
speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
|
||||
*duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
|
||||
speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
|
||||
HCLGE_QUERY_SPEED_S);
|
||||
|
||||
ret = hclge_parse_speed(speed_tmp, speed);
|
||||
|
@ -2225,7 +2225,7 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
|
|||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
|
||||
|
||||
req = (struct hclge_config_auto_neg_cmd *)desc.data;
|
||||
hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
|
||||
hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
|
||||
req->cfg_an_cmd_flag = cpu_to_le32(flag);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
|
@ -2269,7 +2269,7 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
|
|||
req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
|
||||
|
||||
hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
|
||||
hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
|
||||
mask_vlan ? 1 : 0);
|
||||
ether_addr_copy(req->mac_mask, mac_mask);
|
||||
|
||||
|
@ -2711,7 +2711,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
|
|||
}
|
||||
|
||||
val = hclge_read_dev(&hdev->hw, reg);
|
||||
while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
|
||||
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
|
||||
msleep(HCLGE_RESET_WATI_MS);
|
||||
val = hclge_read_dev(&hdev->hw, reg);
|
||||
cnt++;
|
||||
|
@ -2733,8 +2733,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
|
|||
int ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
|
||||
hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
|
||||
hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
|
||||
hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
|
||||
req->fun_reset_vfid = func_id;
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
|
@ -2753,13 +2752,13 @@ static void hclge_do_reset(struct hclge_dev *hdev)
|
|||
switch (hdev->reset_type) {
|
||||
case HNAE3_GLOBAL_RESET:
|
||||
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
|
||||
hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
|
||||
hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
|
||||
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
|
||||
dev_info(&pdev->dev, "Global Reset requested\n");
|
||||
break;
|
||||
case HNAE3_CORE_RESET:
|
||||
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
|
||||
hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
|
||||
hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
|
||||
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
|
||||
dev_info(&pdev->dev, "Core Reset requested\n");
|
||||
break;
|
||||
|
@ -3116,10 +3115,10 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
|
|||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
u16 mode = 0;
|
||||
|
||||
hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
|
||||
hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
|
||||
hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
|
||||
hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
|
||||
HCLGE_RSS_TC_SIZE_S, tc_size[i]);
|
||||
hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
|
||||
hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
|
||||
HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
|
||||
|
||||
req->rss_tc_mode[i] = cpu_to_le16(mode);
|
||||
|
@ -3497,14 +3496,14 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
|
|||
i = 0;
|
||||
for (node = ring_chain; node; node = node->next) {
|
||||
tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
|
||||
hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
|
||||
hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
|
||||
HCLGE_INT_TYPE_S,
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
|
||||
hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
|
||||
HCLGE_TQP_ID_S, node->tqp_index);
|
||||
hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
|
||||
hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
|
||||
HCLGE_INT_GL_IDX_S,
|
||||
hnae_get_field(node->int_gl_idx,
|
||||
hnae3_get_field(node->int_gl_idx,
|
||||
HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S));
|
||||
req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
|
||||
|
@ -3654,20 +3653,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
|
|||
int ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
|
||||
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
|
@ -3695,7 +3694,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
|
|||
|
||||
/* 2 Then setup the loopback flag */
|
||||
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
|
||||
hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
|
||||
hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
|
||||
|
||||
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
|
||||
|
||||
|
@ -3959,9 +3958,9 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
|
|||
req = (struct hclge_mta_filter_mode_cmd *)desc.data;
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
|
||||
|
||||
hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
|
||||
hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
|
||||
enable);
|
||||
hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
|
||||
hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
|
||||
HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
|
@ -3986,7 +3985,7 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
|
|||
req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
|
||||
|
||||
hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
|
||||
hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
|
||||
enable);
|
||||
req->function_id = func_id;
|
||||
|
||||
|
@ -4013,9 +4012,9 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
|
|||
|
||||
req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
|
||||
hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
|
||||
hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
|
||||
|
||||
hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
|
||||
hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
|
||||
HCLGE_CFG_MTA_ITEM_IDX_S, idx);
|
||||
req->item_idx = cpu_to_le16(item_idx);
|
||||
|
||||
|
@ -4263,17 +4262,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
|
|||
}
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
|
||||
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
|
||||
hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
|
||||
|
||||
hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
|
||||
hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
|
||||
hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
|
||||
hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
|
||||
HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
|
||||
hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
|
||||
HCLGE_MAC_EPORT_PFID_S, 0);
|
||||
|
||||
req.egress_port = cpu_to_le16(egress_port);
|
||||
|
||||
|
@ -4324,8 +4316,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
|
|||
}
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
|
||||
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
|
||||
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hclge_prepare_mac_addr(&req, addr);
|
||||
ret = hclge_remove_mac_vlan_tbl(vport, &req);
|
||||
|
||||
|
@ -4357,10 +4349,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
|
|||
return -EINVAL;
|
||||
}
|
||||
memset(&req, 0, sizeof(req));
|
||||
hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
|
||||
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
|
||||
hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
|
||||
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
|
||||
hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hclge_prepare_mac_addr(&req, addr);
|
||||
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
|
||||
if (!status) {
|
||||
|
@ -4424,10 +4416,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
|
|||
}
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
|
||||
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
|
||||
hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
|
||||
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
|
||||
hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
|
||||
hclge_prepare_mac_addr(&req, addr);
|
||||
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
|
||||
if (!status) {
|
||||
|
@ -4808,19 +4800,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
|
|||
req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
|
||||
req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
|
||||
req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
|
||||
vcfg->accept_tag1 ? 1 : 0);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
|
||||
vcfg->accept_untag1 ? 1 : 0);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
|
||||
vcfg->accept_tag2 ? 1 : 0);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
|
||||
vcfg->accept_untag2 ? 1 : 0);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
|
||||
vcfg->insert_tag1_en ? 1 : 0);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
|
||||
vcfg->insert_tag2_en ? 1 : 0);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
|
||||
|
||||
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
|
||||
req->vf_bitmap[req->vf_offset] =
|
||||
|
@ -4846,13 +4838,13 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
|
|||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
|
||||
|
||||
req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
|
||||
vcfg->strip_tag1_en ? 1 : 0);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
|
||||
vcfg->strip_tag2_en ? 1 : 0);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
|
||||
vcfg->vlan1_vlan_prionly ? 1 : 0);
|
||||
hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
|
||||
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
|
||||
vcfg->vlan2_vlan_prionly ? 1 : 0);
|
||||
|
||||
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
|
||||
|
@ -5049,7 +5041,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
|
|||
|
||||
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
|
||||
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
|
||||
hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
|
||||
hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
|
@ -5079,7 +5071,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
|
||||
return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
|
||||
}
|
||||
|
||||
static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
|
||||
|
@ -5386,12 +5378,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
|
|||
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
|
||||
|
||||
retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
|
||||
mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
|
||||
mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
|
||||
HCLGE_PHY_MDIX_CTRL_S);
|
||||
|
||||
retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
|
||||
mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
|
||||
is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
|
||||
mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
|
||||
is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
|
||||
|
||||
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
|
||||
|
||||
|
@ -6164,7 +6156,7 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
|
|||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
|
||||
|
||||
req = (struct hclge_set_led_state_cmd *)desc.data;
|
||||
hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
|
||||
hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
|
||||
HCLGE_LED_LOCATE_STATE_S, locate_led_status);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
|
@ -6295,7 +6287,6 @@ static const struct hnae3_ae_ops hclge_ops = {
|
|||
|
||||
static struct hnae3_ae_algo ae_algo = {
|
||||
.ops = &hclge_ops,
|
||||
.name = HCLGE_NAME,
|
||||
.pdev_id_table = ae_algo_pci_tbl,
|
||||
};
|
||||
|
||||
|
|
|
@ -128,10 +128,10 @@ static int hclge_get_ring_chain_from_mbx(
|
|||
HCLGE_MBX_RING_NODE_VARIABLE_NUM))
|
||||
return -ENOMEM;
|
||||
|
||||
hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
|
||||
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
|
||||
ring_chain->tqp_index =
|
||||
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
|
||||
hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
|
||||
hnae3_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
|
||||
HCLGE_INT_GL_IDX_S,
|
||||
req->msg[5]);
|
||||
|
||||
|
@ -142,7 +142,7 @@ static int hclge_get_ring_chain_from_mbx(
|
|||
if (!new_chain)
|
||||
goto err;
|
||||
|
||||
hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
|
||||
hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
|
||||
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
||||
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
|
||||
|
||||
|
@ -151,7 +151,7 @@ static int hclge_get_ring_chain_from_mbx(
|
|||
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
||||
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
|
||||
|
||||
hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
|
||||
hnae3_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
|
||||
HCLGE_INT_GL_IDX_S,
|
||||
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
||||
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
|
||||
|
@ -460,7 +460,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
|
|||
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
|
||||
|
||||
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
|
||||
if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
|
||||
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"dropped invalid mailbox message, code = %d\n",
|
||||
req->msg[0]);
|
||||
|
|
|
@ -67,15 +67,15 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
|
|||
|
||||
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
|
||||
|
||||
hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
|
||||
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
|
||||
HCLGE_MDIO_PHYID_S, phyid);
|
||||
hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
|
||||
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
|
||||
HCLGE_MDIO_PHYREG_S, regnum);
|
||||
|
||||
hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
|
||||
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
|
||||
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
|
||||
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
|
||||
HCLGE_MDIO_CTRL_ST_S, 1);
|
||||
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
|
||||
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
|
||||
HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
|
||||
|
||||
mdio_cmd->data_wr = cpu_to_le16(data);
|
||||
|
@ -105,15 +105,15 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
|
|||
|
||||
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
|
||||
|
||||
hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
|
||||
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
|
||||
HCLGE_MDIO_PHYID_S, phyid);
|
||||
hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
|
||||
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
|
||||
HCLGE_MDIO_PHYREG_S, regnum);
|
||||
|
||||
hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
|
||||
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
|
||||
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
|
||||
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
|
||||
HCLGE_MDIO_CTRL_ST_S, 1);
|
||||
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
|
||||
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
|
||||
HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
|
||||
|
||||
/* Read out phy data */
|
||||
|
@ -125,7 +125,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
|
||||
if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
|
||||
dev_err(&hdev->pdev->dev, "mdio read data error\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
|
|
@ -1184,9 +1184,9 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
|
|||
u16 qs_id = vport->qs_offset + tc;
|
||||
u8 grp, sub_grp;
|
||||
|
||||
grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
|
||||
grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
|
||||
HCLGE_BP_GRP_ID_S);
|
||||
sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
|
||||
sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
|
||||
HCLGE_BP_SUB_GRP_ID_S);
|
||||
if (i == grp)
|
||||
qs_bitmap |= (1 << sub_grp);
|
||||
|
|
|
@ -123,10 +123,11 @@ struct hclge_port_shapping_cmd {
|
|||
};
|
||||
|
||||
#define hclge_tm_set_field(dest, string, val) \
|
||||
hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
|
||||
hnae3_set_field((dest), \
|
||||
(HCLGE_TM_SHAP_##string##_MSK), \
|
||||
(HCLGE_TM_SHAP_##string##_LSH), val)
|
||||
#define hclge_tm_get_field(src, string) \
|
||||
hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
|
||||
hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
|
||||
(HCLGE_TM_SHAP_##string##_LSH))
|
||||
|
||||
int hclge_tm_schd_init(struct hclge_dev *hdev);
|
||||
|
|
|
@ -76,33 +76,25 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
|
|||
{
|
||||
int size = ring->desc_num * sizeof(struct hclgevf_desc);
|
||||
|
||||
ring->desc = kzalloc(size, GFP_KERNEL);
|
||||
ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
|
||||
size, &ring->desc_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!ring->desc)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
|
||||
size, DMA_BIDIRECTIONAL);
|
||||
|
||||
if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
|
||||
ring->desc_dma_addr = 0;
|
||||
kfree(ring->desc);
|
||||
ring->desc = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
|
||||
{
|
||||
dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
|
||||
ring->desc_num * sizeof(ring->desc[0]),
|
||||
hclgevf_ring_to_dma_dir(ring));
|
||||
int size = ring->desc_num * sizeof(struct hclgevf_desc);
|
||||
|
||||
ring->desc_dma_addr = 0;
|
||||
kfree(ring->desc);
|
||||
if (ring->desc) {
|
||||
dma_free_coherent(cmq_ring_to_dev(ring), size,
|
||||
ring->desc, ring->desc_dma_addr);
|
||||
ring->desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
|
||||
struct hclgevf_cmq_ring *ring)
|
||||
|
|
|
@ -450,11 +450,11 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
|
|||
|
||||
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
|
||||
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
|
||||
hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
|
||||
hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
|
||||
(tc_valid[i] & 0x1));
|
||||
hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
|
||||
hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
|
||||
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
|
||||
hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
|
||||
hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
|
||||
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
|
||||
}
|
||||
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
|
||||
|
@ -582,9 +582,9 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
|
|||
}
|
||||
|
||||
req->msg[idx_offset] =
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
|
||||
hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
|
||||
req->msg[idx_offset + 1] = node->tqp_index;
|
||||
req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
|
||||
req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
|
||||
HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S);
|
||||
|
||||
|
@ -1000,7 +1000,7 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
|
|||
|
||||
/* wait to check the hardware reset completion status */
|
||||
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
|
||||
while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
|
||||
while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
|
||||
(cnt < HCLGEVF_RESET_WAIT_CNT)) {
|
||||
msleep(HCLGEVF_RESET_WAIT_MS);
|
||||
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
|
||||
|
@ -1959,7 +1959,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
|
|||
|
||||
static struct hnae3_ae_algo ae_algovf = {
|
||||
.ops = &hclgevf_ops,
|
||||
.name = HCLGEVF_NAME,
|
||||
.pdev_id_table = ae_algovf_pci_tbl,
|
||||
};
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
|
|||
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
|
||||
|
||||
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
|
||||
if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
|
||||
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"dropped invalid mailbox message, code = %d\n",
|
||||
req->msg[0]);
|
||||
|
|
Loading…
Reference in New Issue