Merge branch 'hns3-next' into net-next
Huazhong Tan says: ==================== This patch-set includes some new features for the HNS3 ethernet controller driver. [patch 01/06] adds support for configuring VF link status on the host. [patch 02/06] adds support for configuring VF spoof check. [patch 03/06] adds support for configuring VF trust. [patch 04/06] adds support for configuring VF bandwidth on the host. [patch 05/06] adds support for configuring VF MAC on the host. [patch 06/06] adds support for tx-scatter-gather-fraglist. ==================== Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
This commit is contained in:
commit
48423dd7e6
|
@ -45,6 +45,7 @@ enum HCLGE_MBX_OPCODE {
|
||||||
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
|
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
|
||||||
HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
|
HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
|
||||||
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
|
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
|
||||||
|
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
|
||||||
|
|
||||||
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
|
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
|
||||||
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
|
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
|
||||||
|
|
|
@ -364,6 +364,19 @@ struct hnae3_ae_dev {
|
||||||
* Enable/disable HW GRO
|
* Enable/disable HW GRO
|
||||||
* add_arfs_entry
|
* add_arfs_entry
|
||||||
* Check the 5-tuples of flow, and create flow director rule
|
* Check the 5-tuples of flow, and create flow director rule
|
||||||
|
* get_vf_config
|
||||||
|
* Get the VF configuration setting by the host
|
||||||
|
* set_vf_link_state
|
||||||
|
* Set VF link status
|
||||||
|
* set_vf_spoofchk
|
||||||
|
* Enable/disable spoof check for specified vf
|
||||||
|
* set_vf_trust
|
||||||
|
* Enable/disable trust for specified vf, if the vf being trusted, then
|
||||||
|
* it can enable promisc mode
|
||||||
|
* set_vf_rate
|
||||||
|
* Set the max tx rate of specified vf.
|
||||||
|
* set_vf_mac
|
||||||
|
* Configure the default MAC for specified VF
|
||||||
*/
|
*/
|
||||||
struct hnae3_ae_ops {
|
struct hnae3_ae_ops {
|
||||||
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
|
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
|
||||||
|
@ -529,6 +542,16 @@ struct hnae3_ae_ops {
|
||||||
int (*mac_connect_phy)(struct hnae3_handle *handle);
|
int (*mac_connect_phy)(struct hnae3_handle *handle);
|
||||||
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
|
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
|
||||||
void (*restore_vlan_table)(struct hnae3_handle *handle);
|
void (*restore_vlan_table)(struct hnae3_handle *handle);
|
||||||
|
int (*get_vf_config)(struct hnae3_handle *handle, int vf,
|
||||||
|
struct ifla_vf_info *ivf);
|
||||||
|
int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
|
||||||
|
int link_state);
|
||||||
|
int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf,
|
||||||
|
bool enable);
|
||||||
|
int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable);
|
||||||
|
int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
|
||||||
|
int min_tx_rate, int max_tx_rate, bool force);
|
||||||
|
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hnae3_dcb_ops {
|
struct hnae3_dcb_ops {
|
||||||
|
|
|
@ -681,7 +681,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = skb_cow_head(skb, 0);
|
ret = skb_cow_head(skb, 0);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret < 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
l3.hdr = skb_network_header(skb);
|
l3.hdr = skb_network_header(skb);
|
||||||
|
@ -962,14 +962,6 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
|
|
||||||
{
|
|
||||||
/* Config bd buffer end */
|
|
||||||
if (!!frag_end)
|
|
||||||
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, 1U);
|
|
||||||
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
|
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
@ -1062,7 +1054,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
|
||||||
skb_reset_mac_len(skb);
|
skb_reset_mac_len(skb);
|
||||||
|
|
||||||
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
|
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret < 0)) {
|
||||||
u64_stats_update_begin(&ring->syncp);
|
u64_stats_update_begin(&ring->syncp);
|
||||||
ring->stats.tx_l4_proto_err++;
|
ring->stats.tx_l4_proto_err++;
|
||||||
u64_stats_update_end(&ring->syncp);
|
u64_stats_update_end(&ring->syncp);
|
||||||
|
@ -1072,7 +1064,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
|
||||||
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
|
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
|
||||||
&type_cs_vlan_tso,
|
&type_cs_vlan_tso,
|
||||||
&ol_type_vlan_len_msec);
|
&ol_type_vlan_len_msec);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret < 0)) {
|
||||||
u64_stats_update_begin(&ring->syncp);
|
u64_stats_update_begin(&ring->syncp);
|
||||||
ring->stats.tx_l2l3l4_err++;
|
ring->stats.tx_l2l3l4_err++;
|
||||||
u64_stats_update_end(&ring->syncp);
|
u64_stats_update_end(&ring->syncp);
|
||||||
|
@ -1081,7 +1073,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
|
||||||
|
|
||||||
ret = hns3_set_tso(skb, &paylen, &mss,
|
ret = hns3_set_tso(skb, &paylen, &mss,
|
||||||
&type_cs_vlan_tso);
|
&type_cs_vlan_tso);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret < 0)) {
|
||||||
u64_stats_update_begin(&ring->syncp);
|
u64_stats_update_begin(&ring->syncp);
|
||||||
ring->stats.tx_tso_err++;
|
ring->stats.tx_tso_err++;
|
||||||
u64_stats_update_end(&ring->syncp);
|
u64_stats_update_end(&ring->syncp);
|
||||||
|
@ -1102,9 +1094,10 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||||
unsigned int size, int frag_end,
|
unsigned int size, enum hns_desc_type type)
|
||||||
enum hns_desc_type type)
|
|
||||||
{
|
{
|
||||||
|
#define HNS3_LIKELY_BD_NUM 1
|
||||||
|
|
||||||
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||||
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
|
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
|
||||||
struct device *dev = ring_to_dev(ring);
|
struct device *dev = ring_to_dev(ring);
|
||||||
|
@ -1118,7 +1111,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = hns3_fill_skb_desc(ring, skb, desc);
|
ret = hns3_fill_skb_desc(ring, skb, desc);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret < 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
||||||
|
@ -1137,19 +1130,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||||
desc_cb->length = size;
|
desc_cb->length = size;
|
||||||
|
|
||||||
if (likely(size <= HNS3_MAX_BD_SIZE)) {
|
if (likely(size <= HNS3_MAX_BD_SIZE)) {
|
||||||
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
|
||||||
|
|
||||||
desc_cb->priv = priv;
|
desc_cb->priv = priv;
|
||||||
desc_cb->dma = dma;
|
desc_cb->dma = dma;
|
||||||
desc_cb->type = type;
|
desc_cb->type = type;
|
||||||
desc->addr = cpu_to_le64(dma);
|
desc->addr = cpu_to_le64(dma);
|
||||||
desc->tx.send_size = cpu_to_le16(size);
|
desc->tx.send_size = cpu_to_le16(size);
|
||||||
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
|
|
||||||
desc->tx.bdtp_fe_sc_vld_ra_ri =
|
desc->tx.bdtp_fe_sc_vld_ra_ri =
|
||||||
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
|
cpu_to_le16(BIT(HNS3_TXD_VLD_B));
|
||||||
|
|
||||||
ring_ptr_move_fw(ring, next_to_use);
|
ring_ptr_move_fw(ring, next_to_use);
|
||||||
return 0;
|
return HNS3_LIKELY_BD_NUM;
|
||||||
}
|
}
|
||||||
|
|
||||||
frag_buf_num = hns3_tx_bd_count(size);
|
frag_buf_num = hns3_tx_bd_count(size);
|
||||||
|
@ -1158,8 +1148,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||||
|
|
||||||
/* When frag size is bigger than hardware limit, split this frag */
|
/* When frag size is bigger than hardware limit, split this frag */
|
||||||
for (k = 0; k < frag_buf_num; k++) {
|
for (k = 0; k < frag_buf_num; k++) {
|
||||||
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
|
||||||
|
|
||||||
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
||||||
desc_cb->priv = priv;
|
desc_cb->priv = priv;
|
||||||
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
|
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
|
||||||
|
@ -1170,11 +1158,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||||
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
|
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
|
||||||
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
|
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
|
||||||
(u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
|
(u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
|
||||||
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
|
|
||||||
frag_end && (k == frag_buf_num - 1) ?
|
|
||||||
1 : 0);
|
|
||||||
desc->tx.bdtp_fe_sc_vld_ra_ri =
|
desc->tx.bdtp_fe_sc_vld_ra_ri =
|
||||||
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
|
cpu_to_le16(BIT(HNS3_TXD_VLD_B));
|
||||||
|
|
||||||
/* move ring pointer to next */
|
/* move ring pointer to next */
|
||||||
ring_ptr_move_fw(ring, next_to_use);
|
ring_ptr_move_fw(ring, next_to_use);
|
||||||
|
@ -1183,23 +1168,78 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||||
desc = &ring->desc[ring->next_to_use];
|
desc = &ring->desc[ring->next_to_use];
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return frag_buf_num;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
|
static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
|
||||||
|
unsigned int bd_num)
|
||||||
{
|
{
|
||||||
unsigned int bd_num;
|
unsigned int size;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* if the total len is within the max bd limit */
|
size = skb_headlen(skb);
|
||||||
if (likely(skb->len <= HNS3_MAX_BD_SIZE))
|
while (size > HNS3_MAX_BD_SIZE) {
|
||||||
return skb_shinfo(skb)->nr_frags + 1;
|
bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
|
||||||
|
size -= HNS3_MAX_BD_SIZE;
|
||||||
|
|
||||||
bd_num = hns3_tx_bd_count(skb_headlen(skb));
|
if (bd_num > HNS3_MAX_TSO_BD_NUM)
|
||||||
|
return bd_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (size) {
|
||||||
|
bd_size[bd_num++] = size;
|
||||||
|
if (bd_num > HNS3_MAX_TSO_BD_NUM)
|
||||||
|
return bd_num;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||||
bd_num += hns3_tx_bd_count(skb_frag_size(frag));
|
size = skb_frag_size(frag);
|
||||||
|
if (!size)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
while (size > HNS3_MAX_BD_SIZE) {
|
||||||
|
bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
|
||||||
|
size -= HNS3_MAX_BD_SIZE;
|
||||||
|
|
||||||
|
if (bd_num > HNS3_MAX_TSO_BD_NUM)
|
||||||
|
return bd_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
bd_size[bd_num++] = size;
|
||||||
|
if (bd_num > HNS3_MAX_TSO_BD_NUM)
|
||||||
|
return bd_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
return bd_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size)
|
||||||
|
{
|
||||||
|
struct sk_buff *frag_skb;
|
||||||
|
unsigned int bd_num = 0;
|
||||||
|
|
||||||
|
/* If the total len is within the max bd limit */
|
||||||
|
if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
|
||||||
|
skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM))
|
||||||
|
return skb_shinfo(skb)->nr_frags + 1U;
|
||||||
|
|
||||||
|
/* The below case will always be linearized, return
|
||||||
|
* HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
|
||||||
|
*/
|
||||||
|
if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
|
||||||
|
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)))
|
||||||
|
return HNS3_MAX_TSO_BD_NUM + 1U;
|
||||||
|
|
||||||
|
bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
|
||||||
|
|
||||||
|
if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
|
||||||
|
return bd_num;
|
||||||
|
|
||||||
|
skb_walk_frags(skb, frag_skb) {
|
||||||
|
bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
|
||||||
|
if (bd_num > HNS3_MAX_TSO_BD_NUM)
|
||||||
|
return bd_num;
|
||||||
}
|
}
|
||||||
|
|
||||||
return bd_num;
|
return bd_num;
|
||||||
|
@ -1218,26 +1258,26 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
|
||||||
* 7 frags to to be larger than gso header len + mss, and the remaining
|
* 7 frags to to be larger than gso header len + mss, and the remaining
|
||||||
* continuous 7 frags to be larger than MSS except the last 7 frags.
|
* continuous 7 frags to be larger than MSS except the last 7 frags.
|
||||||
*/
|
*/
|
||||||
static bool hns3_skb_need_linearized(struct sk_buff *skb)
|
static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
|
||||||
|
unsigned int bd_num)
|
||||||
{
|
{
|
||||||
int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
|
|
||||||
unsigned int tot_len = 0;
|
unsigned int tot_len = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < bd_limit; i++)
|
for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++)
|
||||||
tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
|
tot_len += bd_size[i];
|
||||||
|
|
||||||
/* ensure headlen + the first 7 frags is greater than mss + header
|
/* ensure the first 8 frags is greater than mss + header */
|
||||||
* and the first 7 frags is greater than mss.
|
if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] <
|
||||||
*/
|
skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
|
||||||
if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
|
|
||||||
hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* ensure the remaining continuous 7 buffer is greater than mss */
|
/* ensure every continuous 7 buffer is greater than mss
|
||||||
for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
|
* except the last one.
|
||||||
tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
|
*/
|
||||||
tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
|
for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) {
|
||||||
|
tot_len -= bd_size[i];
|
||||||
|
tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U];
|
||||||
|
|
||||||
if (tot_len < skb_shinfo(skb)->gso_size)
|
if (tot_len < skb_shinfo(skb)->gso_size)
|
||||||
return true;
|
return true;
|
||||||
|
@ -1249,15 +1289,16 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb)
|
||||||
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
||||||
struct sk_buff **out_skb)
|
struct sk_buff **out_skb)
|
||||||
{
|
{
|
||||||
|
unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
|
||||||
struct sk_buff *skb = *out_skb;
|
struct sk_buff *skb = *out_skb;
|
||||||
unsigned int bd_num;
|
unsigned int bd_num;
|
||||||
|
|
||||||
bd_num = hns3_nic_bd_num(skb);
|
bd_num = hns3_tx_bd_num(skb, bd_size);
|
||||||
if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
|
if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
|
||||||
struct sk_buff *new_skb;
|
struct sk_buff *new_skb;
|
||||||
|
|
||||||
if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
|
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
|
||||||
!hns3_skb_need_linearized(skb))
|
!hns3_skb_need_linearized(skb, bd_size, bd_num))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* manual split the send packet */
|
/* manual split the send packet */
|
||||||
|
@ -1267,9 +1308,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
*out_skb = new_skb;
|
*out_skb = new_skb;
|
||||||
|
|
||||||
bd_num = hns3_nic_bd_num(new_skb);
|
bd_num = hns3_tx_bd_count(new_skb->len);
|
||||||
if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
|
if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
|
||||||
(!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
|
(!skb_is_gso(new_skb) &&
|
||||||
|
bd_num > HNS3_MAX_NON_TSO_BD_NUM))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
u64_stats_update_begin(&ring->syncp);
|
u64_stats_update_begin(&ring->syncp);
|
||||||
|
@ -1314,6 +1356,37 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
|
||||||
|
struct sk_buff *skb, enum hns_desc_type type)
|
||||||
|
{
|
||||||
|
unsigned int size = skb_headlen(skb);
|
||||||
|
int i, ret, bd_num = 0;
|
||||||
|
|
||||||
|
if (size) {
|
||||||
|
ret = hns3_fill_desc(ring, skb, size, type);
|
||||||
|
if (unlikely(ret < 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bd_num += ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||||
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||||
|
|
||||||
|
size = skb_frag_size(frag);
|
||||||
|
if (!size)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE);
|
||||||
|
if (unlikely(ret < 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bd_num += ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return bd_num;
|
||||||
|
}
|
||||||
|
|
||||||
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
@ -1321,58 +1394,54 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||||
&tx_ring_data(priv, skb->queue_mapping);
|
&tx_ring_data(priv, skb->queue_mapping);
|
||||||
struct hns3_enet_ring *ring = ring_data->ring;
|
struct hns3_enet_ring *ring = ring_data->ring;
|
||||||
struct netdev_queue *dev_queue;
|
struct netdev_queue *dev_queue;
|
||||||
skb_frag_t *frag;
|
int pre_ntu, next_to_use_head;
|
||||||
int next_to_use_head;
|
struct sk_buff *frag_skb;
|
||||||
int buf_num;
|
int bd_num = 0;
|
||||||
int seg_num;
|
|
||||||
int size;
|
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Prefetch the data used later */
|
/* Prefetch the data used later */
|
||||||
prefetch(skb->data);
|
prefetch(skb->data);
|
||||||
|
|
||||||
buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
|
ret = hns3_nic_maybe_stop_tx(ring, &skb);
|
||||||
if (unlikely(buf_num <= 0)) {
|
if (unlikely(ret <= 0)) {
|
||||||
if (buf_num == -EBUSY) {
|
if (ret == -EBUSY) {
|
||||||
u64_stats_update_begin(&ring->syncp);
|
u64_stats_update_begin(&ring->syncp);
|
||||||
ring->stats.tx_busy++;
|
ring->stats.tx_busy++;
|
||||||
u64_stats_update_end(&ring->syncp);
|
u64_stats_update_end(&ring->syncp);
|
||||||
goto out_net_tx_busy;
|
goto out_net_tx_busy;
|
||||||
} else if (buf_num == -ENOMEM) {
|
} else if (ret == -ENOMEM) {
|
||||||
u64_stats_update_begin(&ring->syncp);
|
u64_stats_update_begin(&ring->syncp);
|
||||||
ring->stats.sw_err_cnt++;
|
ring->stats.sw_err_cnt++;
|
||||||
u64_stats_update_end(&ring->syncp);
|
u64_stats_update_end(&ring->syncp);
|
||||||
}
|
}
|
||||||
|
|
||||||
hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
|
hns3_rl_err(netdev, "xmit error: %d!\n", ret);
|
||||||
goto out_err_tx_ok;
|
goto out_err_tx_ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No. of segments (plus a header) */
|
|
||||||
seg_num = skb_shinfo(skb)->nr_frags + 1;
|
|
||||||
/* Fill the first part */
|
|
||||||
size = skb_headlen(skb);
|
|
||||||
|
|
||||||
next_to_use_head = ring->next_to_use;
|
next_to_use_head = ring->next_to_use;
|
||||||
|
|
||||||
ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
|
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
|
||||||
DESC_TYPE_SKB);
|
if (unlikely(ret < 0))
|
||||||
if (unlikely(ret))
|
|
||||||
goto fill_err;
|
goto fill_err;
|
||||||
|
|
||||||
/* Fill the fragments */
|
bd_num += ret;
|
||||||
for (i = 1; i < seg_num; i++) {
|
|
||||||
frag = &skb_shinfo(skb)->frags[i - 1];
|
|
||||||
size = skb_frag_size(frag);
|
|
||||||
|
|
||||||
ret = hns3_fill_desc(ring, frag, size,
|
if (!skb_has_frag_list(skb))
|
||||||
seg_num - 1 == i ? 1 : 0,
|
goto out;
|
||||||
DESC_TYPE_PAGE);
|
|
||||||
|
|
||||||
if (unlikely(ret))
|
skb_walk_frags(skb, frag_skb) {
|
||||||
|
ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE);
|
||||||
|
if (unlikely(ret < 0))
|
||||||
goto fill_err;
|
goto fill_err;
|
||||||
|
|
||||||
|
bd_num += ret;
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
|
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
|
||||||
|
(ring->desc_num - 1);
|
||||||
|
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
|
||||||
|
cpu_to_le16(BIT(HNS3_TXD_FE_B));
|
||||||
|
|
||||||
/* Complete translate all packets */
|
/* Complete translate all packets */
|
||||||
dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
|
dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
|
||||||
|
@ -1380,7 +1449,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||||
|
|
||||||
wmb(); /* Commit all data before submit */
|
wmb(); /* Commit all data before submit */
|
||||||
|
|
||||||
hnae3_queue_xmit(ring->tqp, buf_num);
|
hnae3_queue_xmit(ring->tqp, bd_num);
|
||||||
|
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
|
@ -1413,6 +1482,16 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* For VF device, if there is a perm_addr, then the user will not
|
||||||
|
* be allowed to change the address.
|
||||||
|
*/
|
||||||
|
if (!hns3_is_phys_func(h->pdev) &&
|
||||||
|
!is_zero_ether_addr(netdev->perm_addr)) {
|
||||||
|
netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
|
||||||
|
netdev->perm_addr, mac_addr->sa_data);
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
|
ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
|
netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
|
||||||
|
@ -1643,6 +1722,29 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
|
||||||
|
{
|
||||||
|
struct hnae3_handle *handle = hns3_get_handle(netdev);
|
||||||
|
|
||||||
|
if (hns3_nic_resetting(netdev))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
if (!handle->ae_algo->ops->set_vf_spoofchk)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
|
||||||
|
{
|
||||||
|
struct hnae3_handle *handle = hns3_get_handle(netdev);
|
||||||
|
|
||||||
|
if (!handle->ae_algo->ops->set_vf_trust)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
|
||||||
|
}
|
||||||
|
|
||||||
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
|
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
|
||||||
{
|
{
|
||||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||||
|
@ -1805,6 +1907,57 @@ static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
|
||||||
|
struct ifla_vf_info *ivf)
|
||||||
|
{
|
||||||
|
struct hnae3_handle *h = hns3_get_handle(ndev);
|
||||||
|
|
||||||
|
if (!h->ae_algo->ops->get_vf_config)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return h->ae_algo->ops->get_vf_config(h, vf, ivf);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
|
||||||
|
int link_state)
|
||||||
|
{
|
||||||
|
struct hnae3_handle *h = hns3_get_handle(ndev);
|
||||||
|
|
||||||
|
if (!h->ae_algo->ops->set_vf_link_state)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
|
||||||
|
int min_tx_rate, int max_tx_rate)
|
||||||
|
{
|
||||||
|
struct hnae3_handle *h = hns3_get_handle(ndev);
|
||||||
|
|
||||||
|
if (!h->ae_algo->ops->set_vf_rate)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
|
||||||
|
false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
|
||||||
|
{
|
||||||
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||||
|
|
||||||
|
if (!h->ae_algo->ops->set_vf_mac)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
if (is_multicast_ether_addr(mac)) {
|
||||||
|
netdev_err(netdev,
|
||||||
|
"Invalid MAC:%pM specified. Could not set MAC\n",
|
||||||
|
mac);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct net_device_ops hns3_nic_netdev_ops = {
|
static const struct net_device_ops hns3_nic_netdev_ops = {
|
||||||
.ndo_open = hns3_nic_net_open,
|
.ndo_open = hns3_nic_net_open,
|
||||||
.ndo_stop = hns3_nic_net_stop,
|
.ndo_stop = hns3_nic_net_stop,
|
||||||
|
@ -1820,10 +1973,15 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
|
||||||
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
|
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
|
||||||
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
|
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
|
||||||
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
|
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
|
||||||
|
.ndo_set_vf_spoofchk = hns3_set_vf_spoofchk,
|
||||||
|
.ndo_set_vf_trust = hns3_set_vf_trust,
|
||||||
#ifdef CONFIG_RFS_ACCEL
|
#ifdef CONFIG_RFS_ACCEL
|
||||||
.ndo_rx_flow_steer = hns3_rx_flow_steer,
|
.ndo_rx_flow_steer = hns3_rx_flow_steer,
|
||||||
#endif
|
#endif
|
||||||
|
.ndo_get_vf_config = hns3_nic_get_vf_config,
|
||||||
|
.ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
|
||||||
|
.ndo_set_vf_rate = hns3_nic_set_vf_rate,
|
||||||
|
.ndo_set_vf_mac = hns3_nic_set_vf_mac,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool hns3_is_phys_func(struct pci_dev *pdev)
|
bool hns3_is_phys_func(struct pci_dev *pdev)
|
||||||
|
@ -2069,9 +2227,8 @@ static void hns3_set_default_feature(struct net_device *netdev)
|
||||||
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
||||||
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
||||||
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
||||||
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
|
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
|
||||||
|
NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
|
||||||
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
|
|
||||||
|
|
||||||
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
|
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
|
||||||
|
|
||||||
|
@ -2081,21 +2238,24 @@ static void hns3_set_default_feature(struct net_device *netdev)
|
||||||
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
||||||
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
||||||
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
||||||
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
|
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
|
||||||
|
NETIF_F_FRAGLIST;
|
||||||
|
|
||||||
netdev->vlan_features |=
|
netdev->vlan_features |=
|
||||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
|
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
|
||||||
NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
|
NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
|
||||||
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
||||||
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
||||||
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
|
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
|
||||||
|
NETIF_F_FRAGLIST;
|
||||||
|
|
||||||
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
||||||
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
||||||
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
||||||
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
||||||
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
|
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
|
||||||
|
NETIF_F_FRAGLIST;
|
||||||
|
|
||||||
if (pdev->revision >= 0x21) {
|
if (pdev->revision >= 0x21) {
|
||||||
netdev->hw_features |= NETIF_F_GRO_HW;
|
netdev->hw_features |= NETIF_F_GRO_HW;
|
||||||
|
@ -2358,7 +2518,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
|
||||||
netdev_tx_completed_queue(dev_queue, pkts, bytes);
|
netdev_tx_completed_queue(dev_queue, pkts, bytes);
|
||||||
|
|
||||||
if (unlikely(pkts && netif_carrier_ok(netdev) &&
|
if (unlikely(pkts && netif_carrier_ok(netdev) &&
|
||||||
(ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
|
ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
|
||||||
/* Make sure that anybody stopping the queue after this
|
/* Make sure that anybody stopping the queue after this
|
||||||
* sees the new next_to_clean.
|
* sees the new next_to_clean.
|
||||||
*/
|
*/
|
||||||
|
@ -3743,23 +3903,24 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set mac addr if it is configured. or leave it to the AE driver */
|
/* Set mac addr if it is configured. or leave it to the AE driver */
|
||||||
static int hns3_init_mac_addr(struct net_device *netdev, bool init)
|
static int hns3_init_mac_addr(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
struct hnae3_handle *h = priv->ae_handle;
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
u8 mac_addr_temp[ETH_ALEN];
|
u8 mac_addr_temp[ETH_ALEN];
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (h->ae_algo->ops->get_mac_addr && init) {
|
if (h->ae_algo->ops->get_mac_addr)
|
||||||
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
|
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
|
||||||
ether_addr_copy(netdev->dev_addr, mac_addr_temp);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the MAC address is valid, if not get a random one */
|
/* Check if the MAC address is valid, if not get a random one */
|
||||||
if (!is_valid_ether_addr(netdev->dev_addr)) {
|
if (!is_valid_ether_addr(mac_addr_temp)) {
|
||||||
eth_hw_addr_random(netdev);
|
eth_hw_addr_random(netdev);
|
||||||
dev_warn(priv->dev, "using random MAC address %pM\n",
|
dev_warn(priv->dev, "using random MAC address %pM\n",
|
||||||
netdev->dev_addr);
|
netdev->dev_addr);
|
||||||
|
} else {
|
||||||
|
ether_addr_copy(netdev->dev_addr, mac_addr_temp);
|
||||||
|
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h->ae_algo->ops->set_mac_addr)
|
if (h->ae_algo->ops->set_mac_addr)
|
||||||
|
@ -3863,7 +4024,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||||
handle->kinfo.netdev = netdev;
|
handle->kinfo.netdev = netdev;
|
||||||
handle->priv = (void *)priv;
|
handle->priv = (void *)priv;
|
||||||
|
|
||||||
hns3_init_mac_addr(netdev, true);
|
hns3_init_mac_addr(netdev);
|
||||||
|
|
||||||
hns3_set_default_feature(netdev);
|
hns3_set_default_feature(netdev);
|
||||||
|
|
||||||
|
@ -4331,7 +4492,7 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
|
||||||
bool vlan_filter_enable;
|
bool vlan_filter_enable;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = hns3_init_mac_addr(netdev, false);
|
ret = hns3_init_mac_addr(netdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ enum hns3_nic_state {
|
||||||
#define HNS3_RING_NAME_LEN 16
|
#define HNS3_RING_NAME_LEN 16
|
||||||
#define HNS3_BUFFER_SIZE_2048 2048
|
#define HNS3_BUFFER_SIZE_2048 2048
|
||||||
#define HNS3_RING_MAX_PENDING 32760
|
#define HNS3_RING_MAX_PENDING 32760
|
||||||
#define HNS3_RING_MIN_PENDING 24
|
#define HNS3_RING_MIN_PENDING 72
|
||||||
#define HNS3_RING_BD_MULTIPLE 8
|
#define HNS3_RING_BD_MULTIPLE 8
|
||||||
/* max frame size of mac */
|
/* max frame size of mac */
|
||||||
#define HNS3_MAC_MAX_FRAME 9728
|
#define HNS3_MAC_MAX_FRAME 9728
|
||||||
|
@ -195,9 +195,13 @@ enum hns3_nic_state {
|
||||||
#define HNS3_VECTOR_INITED 1
|
#define HNS3_VECTOR_INITED 1
|
||||||
|
|
||||||
#define HNS3_MAX_BD_SIZE 65535
|
#define HNS3_MAX_BD_SIZE 65535
|
||||||
#define HNS3_MAX_BD_NUM_NORMAL 8
|
#define HNS3_MAX_NON_TSO_BD_NUM 8U
|
||||||
#define HNS3_MAX_BD_NUM_TSO 63
|
#define HNS3_MAX_TSO_BD_NUM 63U
|
||||||
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
|
#define HNS3_MAX_TSO_SIZE \
|
||||||
|
(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
|
||||||
|
|
||||||
|
#define HNS3_MAX_NON_TSO_SIZE \
|
||||||
|
(HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM)
|
||||||
|
|
||||||
#define HNS3_VECTOR_GL0_OFFSET 0x100
|
#define HNS3_VECTOR_GL0_OFFSET 0x100
|
||||||
#define HNS3_VECTOR_GL1_OFFSET 0x200
|
#define HNS3_VECTOR_GL1_OFFSET 0x200
|
||||||
|
|
|
@ -244,7 +244,7 @@ enum hclge_opcode_type {
|
||||||
/* QCN commands */
|
/* QCN commands */
|
||||||
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
|
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
|
||||||
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
|
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
|
||||||
HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03,
|
HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
|
||||||
HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
|
HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
|
||||||
HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
|
HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
|
||||||
HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
|
HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
|
||||||
|
@ -1090,9 +1090,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
|
||||||
enum hclge_opcode_type opcode, bool is_read);
|
enum hclge_opcode_type opcode, bool is_read);
|
||||||
void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
|
void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
|
||||||
|
|
||||||
int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
|
|
||||||
struct hclge_promisc_param *param);
|
|
||||||
|
|
||||||
enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
|
enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
|
||||||
struct hclge_desc *desc);
|
struct hclge_desc *desc);
|
||||||
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
|
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
|
||||||
|
|
|
@ -1110,6 +1110,82 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
|
||||||
|
{
|
||||||
|
struct hclge_qs_shapping_cmd *shap_cfg_cmd;
|
||||||
|
u8 ir_u, ir_b, ir_s, bs_b, bs_s;
|
||||||
|
struct hclge_desc desc;
|
||||||
|
u32 shapping_para;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
|
||||||
|
|
||||||
|
shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
|
||||||
|
shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
|
||||||
|
|
||||||
|
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"qs%u failed to get tx_rate, ret=%d\n",
|
||||||
|
qsid, ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
|
||||||
|
ir_b = hclge_tm_get_field(shapping_para, IR_B);
|
||||||
|
ir_u = hclge_tm_get_field(shapping_para, IR_U);
|
||||||
|
ir_s = hclge_tm_get_field(shapping_para, IR_S);
|
||||||
|
bs_b = hclge_tm_get_field(shapping_para, BS_B);
|
||||||
|
bs_s = hclge_tm_get_field(shapping_para, BS_S);
|
||||||
|
|
||||||
|
dev_info(&hdev->pdev->dev,
|
||||||
|
"qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
|
||||||
|
qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
|
||||||
|
{
|
||||||
|
struct hnae3_knic_private_info *kinfo;
|
||||||
|
struct hclge_vport *vport;
|
||||||
|
int vport_id, i;
|
||||||
|
|
||||||
|
for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
|
||||||
|
vport = &hdev->vport[vport_id];
|
||||||
|
kinfo = &vport->nic.kinfo;
|
||||||
|
|
||||||
|
dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
|
||||||
|
|
||||||
|
for (i = 0; i < kinfo->num_tc; i++) {
|
||||||
|
u16 qsid = vport->qs_offset + i;
|
||||||
|
|
||||||
|
hclge_dbg_dump_qs_shaper_single(hdev, qsid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
|
||||||
|
const char *cmd_buf)
|
||||||
|
{
|
||||||
|
#define HCLGE_MAX_QSET_NUM 1024
|
||||||
|
|
||||||
|
u16 qsid;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = kstrtou16(cmd_buf, 0, &qsid);
|
||||||
|
if (ret) {
|
||||||
|
hclge_dbg_dump_qs_shaper_all(hdev);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (qsid >= HCLGE_MAX_QSET_NUM) {
|
||||||
|
dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
|
||||||
|
qsid);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
hclge_dbg_dump_qs_shaper_single(hdev, qsid);
|
||||||
|
}
|
||||||
|
|
||||||
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
|
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
|
||||||
{
|
{
|
||||||
#define DUMP_REG "dump reg"
|
#define DUMP_REG "dump reg"
|
||||||
|
@ -1145,6 +1221,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
|
||||||
&cmd_buf[sizeof("dump ncl_config")]);
|
&cmd_buf[sizeof("dump ncl_config")]);
|
||||||
} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
|
} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
|
||||||
hclge_dbg_dump_mac_tnl_status(hdev);
|
hclge_dbg_dump_mac_tnl_status(hdev);
|
||||||
|
} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
|
||||||
|
hclge_dbg_dump_qs_shaper(hdev,
|
||||||
|
&cmd_buf[sizeof("dump qs shaper")]);
|
||||||
} else {
|
} else {
|
||||||
dev_info(&hdev->pdev->dev, "unknown command\n");
|
dev_info(&hdev->pdev->dev, "unknown command\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -55,6 +55,8 @@
|
||||||
|
|
||||||
#define HCLGE_LINK_STATUS_MS 10
|
#define HCLGE_LINK_STATUS_MS 10
|
||||||
|
|
||||||
|
#define HCLGE_VF_VPORT_START_NUM 1
|
||||||
|
|
||||||
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
|
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
|
||||||
static int hclge_init_vlan_config(struct hclge_dev *hdev);
|
static int hclge_init_vlan_config(struct hclge_dev *hdev);
|
||||||
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
|
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
|
||||||
|
@ -1182,6 +1184,35 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
|
||||||
hclge_parse_backplane_link_mode(hdev, speed_ability);
|
hclge_parse_backplane_link_mode(hdev, speed_ability);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 hclge_get_max_speed(u8 speed_ability)
|
||||||
|
{
|
||||||
|
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
|
||||||
|
return HCLGE_MAC_SPEED_100G;
|
||||||
|
|
||||||
|
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
|
||||||
|
return HCLGE_MAC_SPEED_50G;
|
||||||
|
|
||||||
|
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
|
||||||
|
return HCLGE_MAC_SPEED_40G;
|
||||||
|
|
||||||
|
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
|
||||||
|
return HCLGE_MAC_SPEED_25G;
|
||||||
|
|
||||||
|
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
|
||||||
|
return HCLGE_MAC_SPEED_10G;
|
||||||
|
|
||||||
|
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
|
||||||
|
return HCLGE_MAC_SPEED_1G;
|
||||||
|
|
||||||
|
if (speed_ability & HCLGE_SUPPORT_100M_BIT)
|
||||||
|
return HCLGE_MAC_SPEED_100M;
|
||||||
|
|
||||||
|
if (speed_ability & HCLGE_SUPPORT_10M_BIT)
|
||||||
|
return HCLGE_MAC_SPEED_10M;
|
||||||
|
|
||||||
|
return HCLGE_MAC_SPEED_1G;
|
||||||
|
}
|
||||||
|
|
||||||
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
|
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
|
||||||
{
|
{
|
||||||
struct hclge_cfg_param_cmd *req;
|
struct hclge_cfg_param_cmd *req;
|
||||||
|
@ -1352,6 +1383,8 @@ static int hclge_configure(struct hclge_dev *hdev)
|
||||||
|
|
||||||
hclge_parse_link_mode(hdev, cfg.speed_ability);
|
hclge_parse_link_mode(hdev, cfg.speed_ability);
|
||||||
|
|
||||||
|
hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
|
||||||
|
|
||||||
if ((hdev->tc_max > HNAE3_MAX_TC) ||
|
if ((hdev->tc_max > HNAE3_MAX_TC) ||
|
||||||
(hdev->tc_max < 1)) {
|
(hdev->tc_max < 1)) {
|
||||||
dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
|
dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
|
||||||
|
@ -1633,6 +1666,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
|
||||||
for (i = 0; i < num_vport; i++) {
|
for (i = 0; i < num_vport; i++) {
|
||||||
vport->back = hdev;
|
vport->back = hdev;
|
||||||
vport->vport_id = i;
|
vport->vport_id = i;
|
||||||
|
vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
|
||||||
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
|
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
|
||||||
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
|
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
|
||||||
vport->rxvlan_cfg.rx_vlan_offload_en = true;
|
vport->rxvlan_cfg.rx_vlan_offload_en = true;
|
||||||
|
@ -2853,6 +2887,62 @@ static int hclge_get_status(struct hnae3_handle *handle)
|
||||||
return hdev->hw.mac.link;
|
return hdev->hw.mac.link;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
|
||||||
|
{
|
||||||
|
if (pci_num_vf(hdev->pdev) == 0) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"SRIOV is disabled, can not get vport(%d) info.\n", vf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"vf id(%d) is out of range(0 <= vfid < %d)\n",
|
||||||
|
vf, pci_num_vf(hdev->pdev));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* VF start from 1 in vport */
|
||||||
|
vf += HCLGE_VF_VPORT_START_NUM;
|
||||||
|
return &hdev->vport[vf];
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
|
||||||
|
struct ifla_vf_info *ivf)
|
||||||
|
{
|
||||||
|
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||||
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
|
||||||
|
vport = hclge_get_vf_vport(hdev, vf);
|
||||||
|
if (!vport)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ivf->vf = vf;
|
||||||
|
ivf->linkstate = vport->vf_info.link_state;
|
||||||
|
ivf->spoofchk = vport->vf_info.spoofchk;
|
||||||
|
ivf->trusted = vport->vf_info.trusted;
|
||||||
|
ivf->min_tx_rate = 0;
|
||||||
|
ivf->max_tx_rate = vport->vf_info.max_tx_rate;
|
||||||
|
ether_addr_copy(ivf->mac, vport->vf_info.mac);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
|
||||||
|
int link_state)
|
||||||
|
{
|
||||||
|
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||||
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
|
||||||
|
vport = hclge_get_vf_vport(hdev, vf);
|
||||||
|
if (!vport)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
vport->vf_info.link_state = link_state;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||||
{
|
{
|
||||||
u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
|
u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
|
||||||
|
@ -4558,8 +4648,8 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
|
static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
|
||||||
struct hclge_promisc_param *param)
|
struct hclge_promisc_param *param)
|
||||||
{
|
{
|
||||||
struct hclge_promisc_cfg_cmd *req;
|
struct hclge_promisc_cfg_cmd *req;
|
||||||
struct hclge_desc desc;
|
struct hclge_desc desc;
|
||||||
|
@ -4586,8 +4676,9 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
|
static void hclge_promisc_param_init(struct hclge_promisc_param *param,
|
||||||
bool en_mc, bool en_bc, int vport_id)
|
bool en_uc, bool en_mc, bool en_bc,
|
||||||
|
int vport_id)
|
||||||
{
|
{
|
||||||
if (!param)
|
if (!param)
|
||||||
return;
|
return;
|
||||||
|
@ -4602,12 +4693,21 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
|
||||||
param->vf_id = vport_id;
|
param->vf_id = vport_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
|
||||||
|
bool en_mc_pmc, bool en_bc_pmc)
|
||||||
|
{
|
||||||
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
struct hclge_promisc_param param;
|
||||||
|
|
||||||
|
hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
|
||||||
|
vport->vport_id);
|
||||||
|
return hclge_cmd_set_promisc_mode(hdev, ¶m);
|
||||||
|
}
|
||||||
|
|
||||||
static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
|
static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
|
||||||
bool en_mc_pmc)
|
bool en_mc_pmc)
|
||||||
{
|
{
|
||||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||||
struct hclge_dev *hdev = vport->back;
|
|
||||||
struct hclge_promisc_param param;
|
|
||||||
bool en_bc_pmc = true;
|
bool en_bc_pmc = true;
|
||||||
|
|
||||||
/* For revision 0x20, if broadcast promisc enabled, vlan filter is
|
/* For revision 0x20, if broadcast promisc enabled, vlan filter is
|
||||||
|
@ -4617,9 +4717,8 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
|
||||||
if (handle->pdev->revision == 0x20)
|
if (handle->pdev->revision == 0x20)
|
||||||
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
|
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
|
||||||
|
|
||||||
hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
|
return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
|
||||||
vport->vport_id);
|
en_bc_pmc);
|
||||||
return hclge_cmd_set_promisc_mode(hdev, ¶m);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
|
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
|
||||||
|
@ -7391,6 +7490,67 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
|
||||||
return return_status;
|
return return_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
|
||||||
|
u8 *mac_addr)
|
||||||
|
{
|
||||||
|
struct hclge_mac_vlan_tbl_entry_cmd req;
|
||||||
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
struct hclge_desc desc;
|
||||||
|
u16 egress_port = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (is_zero_ether_addr(mac_addr))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
memset(&req, 0, sizeof(req));
|
||||||
|
hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
|
||||||
|
HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
|
||||||
|
req.egress_port = cpu_to_le16(egress_port);
|
||||||
|
hclge_prepare_mac_addr(&req, mac_addr, false);
|
||||||
|
|
||||||
|
if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
vf_idx += HCLGE_VF_VPORT_START_NUM;
|
||||||
|
for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
|
||||||
|
if (i != vf_idx &&
|
||||||
|
ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
|
||||||
|
u8 *mac_addr)
|
||||||
|
{
|
||||||
|
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||||
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
|
||||||
|
vport = hclge_get_vf_vport(hdev, vf);
|
||||||
|
if (!vport)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
|
||||||
|
dev_info(&hdev->pdev->dev,
|
||||||
|
"Specified MAC(=%pM) is same as before, no change committed!\n",
|
||||||
|
mac_addr);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
|
||||||
|
dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
|
||||||
|
mac_addr);
|
||||||
|
return -EEXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
ether_addr_copy(vport->vf_info.mac, mac_addr);
|
||||||
|
dev_info(&hdev->pdev->dev,
|
||||||
|
"MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
|
||||||
|
vf, mac_addr);
|
||||||
|
|
||||||
|
return hclge_inform_reset_assert_to_vf(vport);
|
||||||
|
}
|
||||||
|
|
||||||
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
|
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
|
||||||
const struct hclge_mac_mgr_tbl_entry_cmd *req)
|
const struct hclge_mac_mgr_tbl_entry_cmd *req)
|
||||||
{
|
{
|
||||||
|
@ -7564,6 +7724,8 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
|
||||||
__be16 proto)
|
__be16 proto)
|
||||||
{
|
{
|
||||||
#define HCLGE_MAX_VF_BYTES 16
|
#define HCLGE_MAX_VF_BYTES 16
|
||||||
|
|
||||||
|
struct hclge_vport *vport = &hdev->vport[vfid];
|
||||||
struct hclge_vlan_filter_vf_cfg_cmd *req0;
|
struct hclge_vlan_filter_vf_cfg_cmd *req0;
|
||||||
struct hclge_vlan_filter_vf_cfg_cmd *req1;
|
struct hclge_vlan_filter_vf_cfg_cmd *req1;
|
||||||
struct hclge_desc desc[2];
|
struct hclge_desc desc[2];
|
||||||
|
@ -7572,10 +7734,18 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* if vf vlan table is full, firmware will close vf vlan filter, it
|
/* if vf vlan table is full, firmware will close vf vlan filter, it
|
||||||
* is unable and unnecessary to add new vlan id to vf vlan filter
|
* is unable and unnecessary to add new vlan id to vf vlan filter.
|
||||||
|
* If spoof check is enable, and vf vlan is full, it shouldn't add
|
||||||
|
* new vlan, because tx packets with these vlan id will be dropped.
|
||||||
*/
|
*/
|
||||||
if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
|
if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
|
||||||
|
if (vport->vf_info.spoofchk && vlan) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"Can't add vlan due to spoof check is on and vf vlan table is full\n");
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
hclge_cmd_setup_basic_desc(&desc[0],
|
hclge_cmd_setup_basic_desc(&desc[0],
|
||||||
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
|
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
|
||||||
|
@ -8072,12 +8242,15 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
|
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
|
||||||
if (vlan->hd_tbl_status)
|
int ret;
|
||||||
hclge_set_vlan_filter_hw(hdev,
|
|
||||||
htons(ETH_P_8021Q),
|
if (!vlan->hd_tbl_status)
|
||||||
vport->vport_id,
|
continue;
|
||||||
vlan->vlan_id,
|
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
|
||||||
false);
|
vport->vport_id,
|
||||||
|
vlan->vlan_id, false);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9319,6 +9492,219 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
|
||||||
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
|
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
|
||||||
|
{
|
||||||
|
return hclge_config_switch_param(hdev, vf, enable,
|
||||||
|
HCLGE_SWITCH_ANTI_SPOOF_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
|
||||||
|
{
|
||||||
|
return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
|
||||||
|
HCLGE_FILTER_FE_NIC_INGRESS_B,
|
||||||
|
enable, vf);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = hclge_set_mac_spoofchk(hdev, vf, enable);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"Set vf %d mac spoof check %s failed, ret=%d\n",
|
||||||
|
vf, enable ? "on" : "off", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
|
||||||
|
if (ret)
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"Set vf %d vlan spoof check %s failed, ret=%d\n",
|
||||||
|
vf, enable ? "on" : "off", ret);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
|
||||||
|
bool enable)
|
||||||
|
{
|
||||||
|
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||||
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
u32 new_spoofchk = enable ? 1 : 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (hdev->pdev->revision == 0x20)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
vport = hclge_get_vf_vport(hdev, vf);
|
||||||
|
if (!vport)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (vport->vf_info.spoofchk == new_spoofchk)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
|
||||||
|
dev_warn(&hdev->pdev->dev,
|
||||||
|
"vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
|
||||||
|
vf);
|
||||||
|
else if (enable && hclge_is_umv_space_full(vport))
|
||||||
|
dev_warn(&hdev->pdev->dev,
|
||||||
|
"vf %d mac table is full, enable spoof check may cause its packet send fail\n",
|
||||||
|
vf);
|
||||||
|
|
||||||
|
ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
vport->vf_info.spoofchk = new_spoofchk;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
|
||||||
|
{
|
||||||
|
struct hclge_vport *vport = hdev->vport;
|
||||||
|
int ret;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (hdev->pdev->revision == 0x20)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* resume the vf spoof check state after reset */
|
||||||
|
for (i = 0; i < hdev->num_alloc_vport; i++) {
|
||||||
|
ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
|
||||||
|
vport->vf_info.spoofchk);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
vport++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
|
||||||
|
{
|
||||||
|
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||||
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
u32 new_trusted = enable ? 1 : 0;
|
||||||
|
bool en_bc_pmc;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
vport = hclge_get_vf_vport(hdev, vf);
|
||||||
|
if (!vport)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (vport->vf_info.trusted == new_trusted)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Disable promisc mode for VF if it is not trusted any more. */
|
||||||
|
if (!enable && vport->vf_info.promisc_enable) {
|
||||||
|
en_bc_pmc = hdev->pdev->revision != 0x20;
|
||||||
|
ret = hclge_set_vport_promisc_mode(vport, false, false,
|
||||||
|
en_bc_pmc);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
vport->vf_info.promisc_enable = 0;
|
||||||
|
hclge_inform_vf_promisc_info(vport);
|
||||||
|
}
|
||||||
|
|
||||||
|
vport->vf_info.trusted = new_trusted;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hclge_reset_vf_rate(struct hclge_dev *hdev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
int vf;
|
||||||
|
|
||||||
|
/* reset vf rate to default value */
|
||||||
|
for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
|
||||||
|
struct hclge_vport *vport = &hdev->vport[vf];
|
||||||
|
|
||||||
|
vport->vf_info.max_tx_rate = 0;
|
||||||
|
ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
|
||||||
|
if (ret)
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"vf%d failed to reset to default, ret=%d\n",
|
||||||
|
vf - HCLGE_VF_VPORT_START_NUM, ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
|
||||||
|
int min_tx_rate, int max_tx_rate)
|
||||||
|
{
|
||||||
|
if (min_tx_rate != 0 ||
|
||||||
|
max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
|
||||||
|
min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
|
||||||
|
int min_tx_rate, int max_tx_rate, bool force)
|
||||||
|
{
|
||||||
|
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||||
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
vport = hclge_get_vf_vport(hdev, vf);
|
||||||
|
if (!vport)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
vport->vf_info.max_tx_rate = max_tx_rate;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hclge_resume_vf_rate(struct hclge_dev *hdev)
|
||||||
|
{
|
||||||
|
struct hnae3_handle *handle = &hdev->vport->nic;
|
||||||
|
struct hclge_vport *vport;
|
||||||
|
int ret;
|
||||||
|
int vf;
|
||||||
|
|
||||||
|
/* resume the vf max_tx_rate after reset */
|
||||||
|
for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
|
||||||
|
vport = hclge_get_vf_vport(hdev, vf);
|
||||||
|
if (!vport)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* zero means max rate, after reset, firmware already set it to
|
||||||
|
* max rate, so just continue.
|
||||||
|
*/
|
||||||
|
if (!vport->vf_info.max_tx_rate)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = hclge_set_vf_rate(handle, vf, 0,
|
||||||
|
vport->vf_info.max_tx_rate, true);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"vf%d failed to resume tx_rate:%u, ret=%d\n",
|
||||||
|
vf, vport->vf_info.max_tx_rate, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void hclge_reset_vport_state(struct hclge_dev *hdev)
|
static void hclge_reset_vport_state(struct hclge_dev *hdev)
|
||||||
{
|
{
|
||||||
struct hclge_vport *vport = hdev->vport;
|
struct hclge_vport *vport = hdev->vport;
|
||||||
|
@ -9418,6 +9804,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
hclge_reset_vport_state(hdev);
|
hclge_reset_vport_state(hdev);
|
||||||
|
ret = hclge_reset_vport_spoofchk(hdev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = hclge_resume_vf_rate(hdev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
|
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
|
||||||
HCLGE_DRIVER_NAME);
|
HCLGE_DRIVER_NAME);
|
||||||
|
@ -9430,6 +9823,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||||
struct hclge_dev *hdev = ae_dev->priv;
|
struct hclge_dev *hdev = ae_dev->priv;
|
||||||
struct hclge_mac *mac = &hdev->hw.mac;
|
struct hclge_mac *mac = &hdev->hw.mac;
|
||||||
|
|
||||||
|
hclge_reset_vf_rate(hdev);
|
||||||
hclge_misc_affinity_teardown(hdev);
|
hclge_misc_affinity_teardown(hdev);
|
||||||
hclge_state_uninit(hdev);
|
hclge_state_uninit(hdev);
|
||||||
|
|
||||||
|
@ -10152,6 +10546,12 @@ static const struct hnae3_ae_ops hclge_ops = {
|
||||||
.mac_connect_phy = hclge_mac_connect_phy,
|
.mac_connect_phy = hclge_mac_connect_phy,
|
||||||
.mac_disconnect_phy = hclge_mac_disconnect_phy,
|
.mac_disconnect_phy = hclge_mac_disconnect_phy,
|
||||||
.restore_vlan_table = hclge_restore_vlan_table,
|
.restore_vlan_table = hclge_restore_vlan_table,
|
||||||
|
.get_vf_config = hclge_get_vf_config,
|
||||||
|
.set_vf_link_state = hclge_set_vf_link_state,
|
||||||
|
.set_vf_spoofchk = hclge_set_vf_spoofchk,
|
||||||
|
.set_vf_trust = hclge_set_vf_trust,
|
||||||
|
.set_vf_rate = hclge_set_vf_rate,
|
||||||
|
.set_vf_mac = hclge_set_vf_mac,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct hnae3_ae_algo ae_algo = {
|
static struct hnae3_ae_algo ae_algo = {
|
||||||
|
|
|
@ -258,6 +258,7 @@ struct hclge_mac {
|
||||||
u8 support_autoneg;
|
u8 support_autoneg;
|
||||||
u8 speed_type; /* 0: sfp speed, 1: active speed */
|
u8 speed_type; /* 0: sfp speed, 1: active speed */
|
||||||
u32 speed;
|
u32 speed;
|
||||||
|
u32 max_speed;
|
||||||
u32 speed_ability; /* speed ability supported by current media */
|
u32 speed_ability; /* speed ability supported by current media */
|
||||||
u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
|
u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
|
||||||
u32 fec_mode; /* active fec mode */
|
u32 fec_mode; /* active fec mode */
|
||||||
|
@ -885,6 +886,15 @@ struct hclge_port_base_vlan_config {
|
||||||
struct hclge_vlan_info vlan_info;
|
struct hclge_vlan_info vlan_info;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct hclge_vf_info {
|
||||||
|
int link_state;
|
||||||
|
u8 mac[ETH_ALEN];
|
||||||
|
u32 spoofchk;
|
||||||
|
u32 max_tx_rate;
|
||||||
|
u32 trusted;
|
||||||
|
u16 promisc_enable;
|
||||||
|
};
|
||||||
|
|
||||||
struct hclge_vport {
|
struct hclge_vport {
|
||||||
u16 alloc_tqps; /* Allocated Tx/Rx queues */
|
u16 alloc_tqps; /* Allocated Tx/Rx queues */
|
||||||
|
|
||||||
|
@ -916,15 +926,15 @@ struct hclge_vport {
|
||||||
unsigned long state;
|
unsigned long state;
|
||||||
unsigned long last_active_jiffies;
|
unsigned long last_active_jiffies;
|
||||||
u32 mps; /* Max packet size */
|
u32 mps; /* Max packet size */
|
||||||
|
struct hclge_vf_info vf_info;
|
||||||
|
|
||||||
struct list_head uc_mac_list; /* Store VF unicast table */
|
struct list_head uc_mac_list; /* Store VF unicast table */
|
||||||
struct list_head mc_mac_list; /* Store VF multicast table */
|
struct list_head mc_mac_list; /* Store VF multicast table */
|
||||||
struct list_head vlan_list; /* Store VF vlan table */
|
struct list_head vlan_list; /* Store VF vlan table */
|
||||||
};
|
};
|
||||||
|
|
||||||
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
|
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
|
||||||
bool en_mc, bool en_bc, int vport_id);
|
bool en_mc_pmc, bool en_bc_pmc);
|
||||||
|
|
||||||
int hclge_add_uc_addr_common(struct hclge_vport *vport,
|
int hclge_add_uc_addr_common(struct hclge_vport *vport,
|
||||||
const unsigned char *addr);
|
const unsigned char *addr);
|
||||||
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
|
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
|
||||||
|
@ -993,4 +1003,5 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
|
||||||
struct hclge_desc *desc);
|
struct hclge_desc *desc);
|
||||||
void hclge_report_hw_error(struct hclge_dev *hdev,
|
void hclge_report_hw_error(struct hclge_dev *hdev,
|
||||||
enum hnae3_hw_error_type type);
|
enum hnae3_hw_error_type type);
|
||||||
|
void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -205,12 +205,38 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
|
||||||
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
|
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
|
||||||
struct hclge_mbx_vf_to_pf_cmd *req)
|
struct hclge_mbx_vf_to_pf_cmd *req)
|
||||||
{
|
{
|
||||||
bool en_bc = req->msg[1] ? true : false;
|
#define HCLGE_MBX_BC_INDEX 1
|
||||||
struct hclge_promisc_param param;
|
#define HCLGE_MBX_UC_INDEX 2
|
||||||
|
#define HCLGE_MBX_MC_INDEX 3
|
||||||
|
|
||||||
/* vf is not allowed to enable unicast/multicast broadcast */
|
bool en_bc = req->msg[HCLGE_MBX_BC_INDEX] ? true : false;
|
||||||
hclge_promisc_param_init(¶m, false, false, en_bc, vport->vport_id);
|
bool en_uc = req->msg[HCLGE_MBX_UC_INDEX] ? true : false;
|
||||||
return hclge_cmd_set_promisc_mode(vport->back, ¶m);
|
bool en_mc = req->msg[HCLGE_MBX_MC_INDEX] ? true : false;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!vport->vf_info.trusted) {
|
||||||
|
en_uc = false;
|
||||||
|
en_mc = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
|
||||||
|
if (req->mbx_need_resp)
|
||||||
|
hclge_gen_resp_to_vf(vport, req, ret, NULL, 0);
|
||||||
|
|
||||||
|
vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
|
||||||
|
{
|
||||||
|
u8 dest_vfid = (u8)vport->vport_id;
|
||||||
|
u8 msg_data[2];
|
||||||
|
|
||||||
|
memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
|
||||||
|
|
||||||
|
hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
||||||
|
HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
|
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
|
||||||
|
@ -223,6 +249,20 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
|
||||||
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
|
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
|
||||||
const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
|
const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
|
||||||
|
|
||||||
|
/* If VF MAC has been configured by the host then it
|
||||||
|
* cannot be overridden by the MAC specified by the VM.
|
||||||
|
*/
|
||||||
|
if (!is_zero_ether_addr(vport->vf_info.mac) &&
|
||||||
|
!ether_addr_equal(mac_addr, vport->vf_info.mac)) {
|
||||||
|
status = -EPERM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_valid_ether_addr(mac_addr)) {
|
||||||
|
status = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
hclge_rm_uc_addr_common(vport, old_addr);
|
hclge_rm_uc_addr_common(vport, old_addr);
|
||||||
status = hclge_add_uc_addr_common(vport, mac_addr);
|
status = hclge_add_uc_addr_common(vport, mac_addr);
|
||||||
if (status) {
|
if (status) {
|
||||||
|
@ -250,6 +290,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
|
if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
|
||||||
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
|
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
|
||||||
|
|
||||||
|
@ -324,6 +365,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
|
||||||
proto = msg_cmd->proto;
|
proto = msg_cmd->proto;
|
||||||
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
|
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
|
||||||
vlan, is_kill);
|
vlan, is_kill);
|
||||||
|
if (mbx_req->mbx_need_resp)
|
||||||
|
return hclge_gen_resp_to_vf(vport, mbx_req, status,
|
||||||
|
NULL, 0);
|
||||||
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
|
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
|
||||||
struct hnae3_handle *handle = &vport->nic;
|
struct hnae3_handle *handle = &vport->nic;
|
||||||
bool en = msg_cmd->is_kill ? true : false;
|
bool en = msg_cmd->is_kill ? true : false;
|
||||||
|
@ -398,6 +442,13 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport,
|
||||||
HCLGE_TQPS_RSS_INFO_LEN);
|
HCLGE_TQPS_RSS_INFO_LEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hclge_get_vf_mac_addr(struct hclge_vport *vport,
|
||||||
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
||||||
|
{
|
||||||
|
return hclge_gen_resp_to_vf(vport, mbx_req, 0, vport->vf_info.mac,
|
||||||
|
ETH_ALEN);
|
||||||
|
}
|
||||||
|
|
||||||
static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
|
static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
|
||||||
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
||||||
bool gen_resp)
|
bool gen_resp)
|
||||||
|
@ -428,6 +479,9 @@ static int hclge_get_vf_media_type(struct hclge_vport *vport,
|
||||||
static int hclge_get_link_info(struct hclge_vport *vport,
|
static int hclge_get_link_info(struct hclge_vport *vport,
|
||||||
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
||||||
{
|
{
|
||||||
|
#define HCLGE_VF_LINK_STATE_UP 1U
|
||||||
|
#define HCLGE_VF_LINK_STATE_DOWN 0U
|
||||||
|
|
||||||
struct hclge_dev *hdev = vport->back;
|
struct hclge_dev *hdev = vport->back;
|
||||||
u16 link_status;
|
u16 link_status;
|
||||||
u8 msg_data[8];
|
u8 msg_data[8];
|
||||||
|
@ -435,7 +489,19 @@ static int hclge_get_link_info(struct hclge_vport *vport,
|
||||||
u16 duplex;
|
u16 duplex;
|
||||||
|
|
||||||
/* mac.link can only be 0 or 1 */
|
/* mac.link can only be 0 or 1 */
|
||||||
link_status = (u16)hdev->hw.mac.link;
|
switch (vport->vf_info.link_state) {
|
||||||
|
case IFLA_VF_LINK_STATE_ENABLE:
|
||||||
|
link_status = HCLGE_VF_LINK_STATE_UP;
|
||||||
|
break;
|
||||||
|
case IFLA_VF_LINK_STATE_DISABLE:
|
||||||
|
link_status = HCLGE_VF_LINK_STATE_DOWN;
|
||||||
|
break;
|
||||||
|
case IFLA_VF_LINK_STATE_AUTO:
|
||||||
|
default:
|
||||||
|
link_status = (u16)hdev->hw.mac.link;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
duplex = hdev->hw.mac.duplex;
|
duplex = hdev->hw.mac.duplex;
|
||||||
memcpy(&msg_data[0], &link_status, sizeof(u16));
|
memcpy(&msg_data[0], &link_status, sizeof(u16));
|
||||||
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
|
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
|
||||||
|
@ -749,6 +815,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
|
||||||
case HCLGE_MBX_PUSH_LINK_STATUS:
|
case HCLGE_MBX_PUSH_LINK_STATUS:
|
||||||
hclge_handle_link_change_event(hdev, req);
|
hclge_handle_link_change_event(hdev, req);
|
||||||
break;
|
break;
|
||||||
|
case HCLGE_MBX_GET_MAC_ADDR:
|
||||||
|
ret = hclge_get_vf_mac_addr(vport, req);
|
||||||
|
if (ret)
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"PF failed(%d) to get MAC for VF\n",
|
||||||
|
ret);
|
||||||
|
break;
|
||||||
case HCLGE_MBX_NCSI_ERROR:
|
case HCLGE_MBX_NCSI_ERROR:
|
||||||
hclge_handle_ncsi_error(hdev);
|
hclge_handle_ncsi_error(hdev);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -511,6 +511,49 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
|
||||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
|
||||||
|
{
|
||||||
|
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
||||||
|
struct hclge_qs_shapping_cmd *shap_cfg_cmd;
|
||||||
|
struct hclge_dev *hdev = vport->back;
|
||||||
|
struct hclge_desc desc;
|
||||||
|
u8 ir_b, ir_u, ir_s;
|
||||||
|
u32 shaper_para;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
if (!max_tx_rate)
|
||||||
|
max_tx_rate = HCLGE_ETHER_MAX_RATE;
|
||||||
|
|
||||||
|
ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
|
||||||
|
&ir_b, &ir_u, &ir_s);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
|
||||||
|
HCLGE_SHAPER_BS_U_DEF,
|
||||||
|
HCLGE_SHAPER_BS_S_DEF);
|
||||||
|
|
||||||
|
for (i = 0; i < kinfo->num_tc; i++) {
|
||||||
|
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
|
||||||
|
false);
|
||||||
|
|
||||||
|
shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
|
||||||
|
shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
|
||||||
|
shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
|
||||||
|
|
||||||
|
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"vf%d, qs%u failed to set tx_rate:%d, ret=%d\n",
|
||||||
|
vport->vport_id, shap_cfg_cmd->qs_id,
|
||||||
|
max_tx_rate, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||||
{
|
{
|
||||||
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
||||||
|
|
|
@ -96,6 +96,12 @@ struct hclge_pg_shapping_cmd {
|
||||||
__le32 pg_shapping_para;
|
__le32 pg_shapping_para;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct hclge_qs_shapping_cmd {
|
||||||
|
__le16 qs_id;
|
||||||
|
u8 rsvd[2];
|
||||||
|
__le32 qs_shapping_para;
|
||||||
|
};
|
||||||
|
|
||||||
#define HCLGE_BP_GRP_NUM 32
|
#define HCLGE_BP_GRP_NUM 32
|
||||||
#define HCLGE_BP_SUB_GRP_ID_S 0
|
#define HCLGE_BP_SUB_GRP_ID_S 0
|
||||||
#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
|
#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
|
||||||
|
@ -154,4 +160,6 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
|
||||||
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
|
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
|
||||||
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
|
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
|
||||||
int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
|
int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
|
||||||
|
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1105,6 +1105,7 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
|
static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
|
||||||
|
bool en_uc_pmc, bool en_mc_pmc,
|
||||||
bool en_bc_pmc)
|
bool en_bc_pmc)
|
||||||
{
|
{
|
||||||
struct hclge_mbx_vf_to_pf_cmd *req;
|
struct hclge_mbx_vf_to_pf_cmd *req;
|
||||||
|
@ -1112,10 +1113,11 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
|
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
|
||||||
|
|
||||||
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
|
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
|
||||||
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
|
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
|
||||||
req->msg[1] = en_bc_pmc ? 1 : 0;
|
req->msg[1] = en_bc_pmc ? 1 : 0;
|
||||||
|
req->msg[2] = en_uc_pmc ? 1 : 0;
|
||||||
|
req->msg[3] = en_mc_pmc ? 1 : 0;
|
||||||
|
|
||||||
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
|
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1125,9 +1127,17 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
|
static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
|
||||||
|
bool en_mc_pmc)
|
||||||
{
|
{
|
||||||
return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
|
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||||
|
struct pci_dev *pdev = hdev->pdev;
|
||||||
|
bool en_bc_pmc;
|
||||||
|
|
||||||
|
en_bc_pmc = pdev->revision != 0x20;
|
||||||
|
|
||||||
|
return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
|
||||||
|
en_bc_pmc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
|
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
|
||||||
|
@ -1166,11 +1176,37 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
|
||||||
|
{
|
||||||
|
u8 host_mac[ETH_ALEN];
|
||||||
|
int status;
|
||||||
|
|
||||||
|
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0,
|
||||||
|
true, host_mac, ETH_ALEN);
|
||||||
|
if (status) {
|
||||||
|
dev_err(&hdev->pdev->dev,
|
||||||
|
"fail to get VF MAC from host %d", status);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
ether_addr_copy(p, host_mac);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
|
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
|
||||||
{
|
{
|
||||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||||
|
u8 host_mac_addr[ETH_ALEN];
|
||||||
|
|
||||||
ether_addr_copy(p, hdev->hw.mac.mac_addr);
|
if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
|
||||||
|
return;
|
||||||
|
|
||||||
|
hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
|
||||||
|
if (hdev->has_pf_mac)
|
||||||
|
ether_addr_copy(p, host_mac_addr);
|
||||||
|
else
|
||||||
|
ether_addr_copy(p, hdev->hw.mac.mac_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
|
static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
|
||||||
|
@ -1267,7 +1303,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
|
||||||
memcpy(&msg_data[3], &proto, sizeof(proto));
|
memcpy(&msg_data[3], &proto, sizeof(proto));
|
||||||
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
|
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
|
||||||
HCLGE_MBX_VLAN_FILTER, msg_data,
|
HCLGE_MBX_VLAN_FILTER, msg_data,
|
||||||
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
|
HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
|
||||||
|
|
||||||
/* when remove hw vlan filter failed, record the vlan id,
|
/* when remove hw vlan filter failed, record the vlan id,
|
||||||
* and try to remove it from hw later, to be consistence
|
* and try to remove it from hw later, to be consistence
|
||||||
|
@ -2626,12 +2662,6 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pdev->revision >= 0x21) {
|
|
||||||
ret = hclgevf_set_promisc_mode(hdev, true);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_info(&hdev->pdev->dev, "Reset done\n");
|
dev_info(&hdev->pdev->dev, "Reset done\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2706,17 +2736,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_config;
|
goto err_config;
|
||||||
|
|
||||||
/* vf is not allowed to enable unicast/multicast promisc mode.
|
|
||||||
* For revision 0x20, default to disable broadcast promisc mode,
|
|
||||||
* firmware makes sure broadcast packets can be accepted.
|
|
||||||
* For revision 0x21, default to enable broadcast promisc mode.
|
|
||||||
*/
|
|
||||||
if (pdev->revision >= 0x21) {
|
|
||||||
ret = hclgevf_set_promisc_mode(hdev, true);
|
|
||||||
if (ret)
|
|
||||||
goto err_config;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize RSS for this VF */
|
/* Initialize RSS for this VF */
|
||||||
ret = hclgevf_rss_init_hw(hdev);
|
ret = hclgevf_rss_init_hw(hdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -3130,6 +3149,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
|
||||||
.get_global_queue_id = hclgevf_get_qid_global,
|
.get_global_queue_id = hclgevf_get_qid_global,
|
||||||
.set_timer_task = hclgevf_set_timer_task,
|
.set_timer_task = hclgevf_set_timer_task,
|
||||||
.get_link_mode = hclgevf_get_link_mode,
|
.get_link_mode = hclgevf_get_link_mode,
|
||||||
|
.set_promisc_mode = hclgevf_set_promisc_mode,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct hnae3_ae_algo ae_algovf = {
|
static struct hnae3_ae_algo ae_algovf = {
|
||||||
|
|
|
@ -266,6 +266,7 @@ struct hclgevf_dev {
|
||||||
u16 num_tx_desc; /* desc num of per tx queue */
|
u16 num_tx_desc; /* desc num of per tx queue */
|
||||||
u16 num_rx_desc; /* desc num of per rx queue */
|
u16 num_rx_desc; /* desc num of per rx queue */
|
||||||
u8 hw_tc_map;
|
u8 hw_tc_map;
|
||||||
|
u8 has_pf_mac;
|
||||||
|
|
||||||
u16 num_msi;
|
u16 num_msi;
|
||||||
u16 num_msi_left;
|
u16 num_msi_left;
|
||||||
|
|
|
@ -205,6 +205,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
|
||||||
case HCLGE_MBX_ASSERTING_RESET:
|
case HCLGE_MBX_ASSERTING_RESET:
|
||||||
case HCLGE_MBX_LINK_STAT_MODE:
|
case HCLGE_MBX_LINK_STAT_MODE:
|
||||||
case HCLGE_MBX_PUSH_VLAN_INFO:
|
case HCLGE_MBX_PUSH_VLAN_INFO:
|
||||||
|
case HCLGE_MBX_PUSH_PROMISC_INFO:
|
||||||
/* set this mbx event as pending. This is required as we
|
/* set this mbx event as pending. This is required as we
|
||||||
* might loose interrupt event when mbx task is busy
|
* might loose interrupt event when mbx task is busy
|
||||||
* handling. This shall be cleared when mbx task just
|
* handling. This shall be cleared when mbx task just
|
||||||
|
@ -248,6 +249,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
|
||||||
crq->next_to_use);
|
crq->next_to_use);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
|
||||||
|
u16 promisc_info)
|
||||||
|
{
|
||||||
|
if (!promisc_info)
|
||||||
|
dev_info(&hdev->pdev->dev,
|
||||||
|
"Promisc mode is closed by host for being untrusted.\n");
|
||||||
|
}
|
||||||
|
|
||||||
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
|
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
|
||||||
{
|
{
|
||||||
enum hnae3_reset_type reset_type;
|
enum hnae3_reset_type reset_type;
|
||||||
|
@ -313,6 +322,9 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
|
||||||
hclgevf_update_port_base_vlan_info(hdev, state,
|
hclgevf_update_port_base_vlan_info(hdev, state,
|
||||||
(u8 *)vlan_info, 8);
|
(u8 *)vlan_info, 8);
|
||||||
break;
|
break;
|
||||||
|
case HCLGE_MBX_PUSH_PROMISC_INFO:
|
||||||
|
hclgevf_parse_promisc_info(hdev, msg_q[1]);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
"fetched unsupported(%d) message from arq\n",
|
"fetched unsupported(%d) message from arq\n",
|
||||||
|
|
Loading…
Reference in New Issue