Merge branch 'hns3-fixes'
Salil Mehta says: ==================== Fixes, cleanups & minor additions to HNS3 driver This patch-set present some fixes, cleanups to the HNS3 PF and VF driver. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1e6a4bc874
|
@ -29,7 +29,7 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
|
|||
return false;
|
||||
}
|
||||
|
||||
static void hnae3_set_client_init_flag(struct hnae3_client *client,
|
||||
void hnae3_set_client_init_flag(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev, int inited)
|
||||
{
|
||||
switch (client->type) {
|
||||
|
@ -46,6 +46,7 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
|
|||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(hnae3_set_client_init_flag);
|
||||
|
||||
static int hnae3_get_client_init_flag(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev)
|
||||
|
@ -86,14 +87,11 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
|
|||
/* now, (un-)instantiate client by calling lower layer */
|
||||
if (is_reg) {
|
||||
ret = ae_dev->ops->init_client_instance(client, ae_dev);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"fail to instantiate client, ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hnae3_set_client_init_flag(client, ae_dev, 1);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (hnae3_get_client_init_flag(client, ae_dev)) {
|
||||
|
|
|
@ -337,6 +337,8 @@ struct hnae3_ae_ops {
|
|||
void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
|
||||
int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
|
||||
bool is_first);
|
||||
int (*do_ioctl)(struct hnae3_handle *handle,
|
||||
struct ifreq *ifr, int cmd);
|
||||
int (*add_uc_addr)(struct hnae3_handle *handle,
|
||||
const unsigned char *addr);
|
||||
int (*rm_uc_addr)(struct hnae3_handle *handle,
|
||||
|
@ -521,4 +523,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
|
|||
|
||||
void hnae3_unregister_client(struct hnae3_client *client);
|
||||
int hnae3_register_client(struct hnae3_client *client);
|
||||
|
||||
void hnae3_set_client_init_flag(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev, int inited);
|
||||
#endif
|
||||
|
|
|
@ -66,6 +66,23 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* This callback function is used to set affinity changes to the irq affinity
|
||||
* masks when the irq_set_affinity_notifier function is used.
|
||||
*/
|
||||
static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
|
||||
const cpumask_t *mask)
|
||||
{
|
||||
struct hns3_enet_tqp_vector *tqp_vectors =
|
||||
container_of(notify, struct hns3_enet_tqp_vector,
|
||||
affinity_notify);
|
||||
|
||||
tqp_vectors->affinity_mask = *mask;
|
||||
}
|
||||
|
||||
static void hns3_nic_irq_affinity_release(struct kref *ref)
|
||||
{
|
||||
}
|
||||
|
||||
static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
|
||||
{
|
||||
struct hns3_enet_tqp_vector *tqp_vectors;
|
||||
|
@ -77,6 +94,10 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
|
|||
if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
|
||||
continue;
|
||||
|
||||
/* clear the affinity notifier and affinity mask */
|
||||
irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
|
||||
irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
|
||||
|
||||
/* release the irq resource */
|
||||
free_irq(tqp_vectors->vector_irq, tqp_vectors);
|
||||
tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
|
||||
|
@ -127,6 +148,15 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
tqp_vectors->affinity_notify.notify =
|
||||
hns3_nic_irq_affinity_notify;
|
||||
tqp_vectors->affinity_notify.release =
|
||||
hns3_nic_irq_affinity_release;
|
||||
irq_set_affinity_notifier(tqp_vectors->vector_irq,
|
||||
&tqp_vectors->affinity_notify);
|
||||
irq_set_affinity_hint(tqp_vectors->vector_irq,
|
||||
&tqp_vectors->affinity_mask);
|
||||
|
||||
tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
|
||||
}
|
||||
|
||||
|
@ -1044,7 +1074,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
|
|||
/* No. of segments (plus a header) */
|
||||
buf_num = skb_shinfo(skb)->nr_frags + 1;
|
||||
|
||||
if (buf_num > ring_space(ring))
|
||||
if (unlikely(ring_space(ring) < buf_num))
|
||||
return -EBUSY;
|
||||
|
||||
*bnum = buf_num;
|
||||
|
@ -1209,6 +1239,20 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_nic_do_ioctl(struct net_device *netdev,
|
||||
struct ifreq *ifr, int cmd)
|
||||
{
|
||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
|
||||
if (!netif_running(netdev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!h->ae_algo->ops->do_ioctl)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
|
||||
}
|
||||
|
||||
static int hns3_nic_set_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
|
@ -1535,6 +1579,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
|
|||
.ndo_start_xmit = hns3_nic_net_xmit,
|
||||
.ndo_tx_timeout = hns3_nic_net_timeout,
|
||||
.ndo_set_mac_address = hns3_nic_net_set_mac_address,
|
||||
.ndo_do_ioctl = hns3_nic_do_ioctl,
|
||||
.ndo_change_mtu = hns3_nic_change_mtu,
|
||||
.ndo_set_features = hns3_nic_set_features,
|
||||
.ndo_get_stats64 = hns3_nic_get_stats64,
|
||||
|
@ -1925,7 +1970,7 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
|
|||
return u > c ? (h > c && h <= u) : (h > c || h <= u);
|
||||
}
|
||||
|
||||
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
|
||||
void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
|
||||
{
|
||||
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
|
||||
struct netdev_queue *dev_queue;
|
||||
|
@ -1936,7 +1981,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
|
|||
rmb(); /* Make sure head is ready before touch any data */
|
||||
|
||||
if (is_ring_empty(ring) || head == ring->next_to_clean)
|
||||
return true; /* no data to poll */
|
||||
return; /* no data to poll */
|
||||
|
||||
if (unlikely(!is_valid_clean_head(ring, head))) {
|
||||
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
|
||||
|
@ -1945,16 +1990,15 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
|
|||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.io_err_cnt++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
return true;
|
||||
return;
|
||||
}
|
||||
|
||||
bytes = 0;
|
||||
pkts = 0;
|
||||
while (head != ring->next_to_clean && budget) {
|
||||
while (head != ring->next_to_clean) {
|
||||
hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
|
||||
/* Issue prefetch for next Tx descriptor */
|
||||
prefetch(&ring->desc_cb[ring->next_to_clean]);
|
||||
budget--;
|
||||
}
|
||||
|
||||
ring->tqp_vector->tx_group.total_bytes += bytes;
|
||||
|
@ -1979,8 +2023,6 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
|
|||
ring->stats.restart_queue++;
|
||||
}
|
||||
}
|
||||
|
||||
return !!budget;
|
||||
}
|
||||
|
||||
static int hns3_desc_unused(struct hns3_enet_ring *ring)
|
||||
|
@ -2514,10 +2556,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
|
|||
/* Since the actual Tx work is minimal, we can give the Tx a larger
|
||||
* budget and be more aggressive about cleaning up the Tx descriptors.
|
||||
*/
|
||||
hns3_for_each_ring(ring, tqp_vector->tx_group) {
|
||||
if (!hns3_clean_tx_ring(ring, budget))
|
||||
clean_complete = false;
|
||||
}
|
||||
hns3_for_each_ring(ring, tqp_vector->tx_group)
|
||||
hns3_clean_tx_ring(ring);
|
||||
|
||||
/* make sure rx ring budget not smaller than 1 */
|
||||
rx_budget = max(budget / tqp_vector->num_tqps, 1);
|
||||
|
@ -2640,6 +2680,23 @@ static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
|
|||
group->count++;
|
||||
}
|
||||
|
||||
static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
|
||||
{
|
||||
struct pci_dev *pdev = priv->ae_handle->pdev;
|
||||
struct hns3_enet_tqp_vector *tqp_vector;
|
||||
int num_vectors = priv->vector_num;
|
||||
int numa_node;
|
||||
int vector_i;
|
||||
|
||||
numa_node = dev_to_node(&pdev->dev);
|
||||
|
||||
for (vector_i = 0; vector_i < num_vectors; vector_i++) {
|
||||
tqp_vector = &priv->tqp_vector[vector_i];
|
||||
cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
|
||||
&tqp_vector->affinity_mask);
|
||||
}
|
||||
}
|
||||
|
||||
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
|
||||
{
|
||||
struct hnae3_ring_chain_node vector_ring_chain;
|
||||
|
@ -2648,6 +2705,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
|
|||
int ret = 0;
|
||||
u16 i;
|
||||
|
||||
hns3_nic_set_cpumask(priv);
|
||||
|
||||
for (i = 0; i < priv->vector_num; i++) {
|
||||
tqp_vector = &priv->tqp_vector[i];
|
||||
hns3_vector_gl_rl_init_hw(tqp_vector, priv);
|
||||
|
|
|
@ -491,7 +491,9 @@ struct hns3_enet_tqp_vector {
|
|||
struct hns3_enet_ring_group rx_group;
|
||||
struct hns3_enet_ring_group tx_group;
|
||||
|
||||
cpumask_t affinity_mask;
|
||||
u16 num_tqps; /* total number of tqps in TQP vector */
|
||||
struct irq_affinity_notify affinity_notify;
|
||||
|
||||
char name[HNAE3_INT_NAME_LEN];
|
||||
|
||||
|
@ -615,7 +617,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev);
|
|||
int hns3_set_channels(struct net_device *netdev,
|
||||
struct ethtool_channels *ch);
|
||||
|
||||
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
|
||||
void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
|
||||
int hns3_init_all_ring(struct hns3_nic_priv *priv);
|
||||
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
|
||||
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
|
||||
|
|
|
@ -200,7 +200,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
|
|||
for (i = start_ringid; i <= end_ringid; i++) {
|
||||
struct hns3_enet_ring *ring = priv->ring_data[i].ring;
|
||||
|
||||
hns3_clean_tx_ring(ring, budget);
|
||||
hns3_clean_tx_ring(ring);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,8 +24,6 @@
|
|||
#define HCLGE_NAME "hclge"
|
||||
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
|
||||
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
|
||||
#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
|
||||
#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
|
||||
|
||||
static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
|
||||
enum hclge_mta_dmac_sel_type mta_mac_sel,
|
||||
|
@ -56,170 +54,6 @@ static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
|
|||
"Phy Loopback test"
|
||||
};
|
||||
|
||||
static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
|
||||
{"igu_rx_oversize_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
|
||||
{"igu_rx_undersize_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
|
||||
{"igu_rx_out_all_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
|
||||
{"igu_rx_uni_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
|
||||
{"igu_rx_multi_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
|
||||
{"igu_rx_broad_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
|
||||
{"egu_tx_out_all_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
|
||||
{"egu_tx_uni_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
|
||||
{"egu_tx_multi_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
|
||||
{"egu_tx_broad_pkt",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
|
||||
{"ssu_ppp_mac_key_num",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
|
||||
{"ssu_ppp_host_key_num",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
|
||||
{"ppp_ssu_mac_rlt_num",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
|
||||
{"ppp_ssu_host_rlt_num",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
|
||||
{"ssu_tx_in_num",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
|
||||
{"ssu_tx_out_num",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
|
||||
{"ssu_rx_in_num",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
|
||||
{"ssu_rx_out_num",
|
||||
HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
|
||||
};
|
||||
|
||||
static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
|
||||
{"igu_rx_err_pkt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
|
||||
{"igu_rx_no_eof_pkt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
|
||||
{"igu_rx_no_sof_pkt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
|
||||
{"egu_tx_1588_pkt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
|
||||
{"ssu_full_drop_num",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
|
||||
{"ssu_part_drop_num",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
|
||||
{"ppp_key_drop_num",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
|
||||
{"ppp_rlt_drop_num",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
|
||||
{"ssu_key_drop_num",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
|
||||
{"pkt_curr_buf_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
|
||||
{"qcn_fb_rcv_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
|
||||
{"qcn_fb_drop_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
|
||||
{"qcn_fb_invaild_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
|
||||
{"rx_packet_tc0_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
|
||||
{"rx_packet_tc1_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
|
||||
{"rx_packet_tc2_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
|
||||
{"rx_packet_tc3_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
|
||||
{"rx_packet_tc4_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
|
||||
{"rx_packet_tc5_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
|
||||
{"rx_packet_tc6_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
|
||||
{"rx_packet_tc7_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
|
||||
{"rx_packet_tc0_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
|
||||
{"rx_packet_tc1_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
|
||||
{"rx_packet_tc2_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
|
||||
{"rx_packet_tc3_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
|
||||
{"rx_packet_tc4_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
|
||||
{"rx_packet_tc5_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
|
||||
{"rx_packet_tc6_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
|
||||
{"rx_packet_tc7_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
|
||||
{"tx_packet_tc0_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
|
||||
{"tx_packet_tc1_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
|
||||
{"tx_packet_tc2_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
|
||||
{"tx_packet_tc3_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
|
||||
{"tx_packet_tc4_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
|
||||
{"tx_packet_tc5_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
|
||||
{"tx_packet_tc6_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
|
||||
{"tx_packet_tc7_in_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
|
||||
{"tx_packet_tc0_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
|
||||
{"tx_packet_tc1_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
|
||||
{"tx_packet_tc2_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
|
||||
{"tx_packet_tc3_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
|
||||
{"tx_packet_tc4_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
|
||||
{"tx_packet_tc5_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
|
||||
{"tx_packet_tc6_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
|
||||
{"tx_packet_tc7_out_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
|
||||
{"pkt_curr_buf_tc0_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
|
||||
{"pkt_curr_buf_tc1_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
|
||||
{"pkt_curr_buf_tc2_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
|
||||
{"pkt_curr_buf_tc3_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
|
||||
{"pkt_curr_buf_tc4_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
|
||||
{"pkt_curr_buf_tc5_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
|
||||
{"pkt_curr_buf_tc6_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
|
||||
{"pkt_curr_buf_tc7_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
|
||||
{"mb_uncopy_num",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
|
||||
{"lo_pri_unicast_rlt_drop_num",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
|
||||
{"hi_pri_multicast_rlt_drop_num",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
|
||||
{"lo_pri_multicast_rlt_drop_num",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
|
||||
{"rx_oq_drop_pkt_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
|
||||
{"tx_oq_drop_pkt_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
|
||||
{"nic_l2_err_drop_pkt_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
|
||||
{"roc_l2_err_drop_pkt_cnt",
|
||||
HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
|
||||
};
|
||||
|
||||
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
|
||||
{"mac_tx_mac_pause_num",
|
||||
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
|
||||
|
@ -394,109 +228,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_64_BIT_CMD_NUM 5
|
||||
#define HCLGE_64_BIT_RTN_DATANUM 4
|
||||
u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
|
||||
struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
|
||||
__le64 *desc_data;
|
||||
int i, k, n;
|
||||
int ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get 64 bit pkt stats fail, status = %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
|
||||
if (unlikely(i == 0)) {
|
||||
desc_data = (__le64 *)(&desc[i].data[0]);
|
||||
n = HCLGE_64_BIT_RTN_DATANUM - 1;
|
||||
} else {
|
||||
desc_data = (__le64 *)(&desc[i]);
|
||||
n = HCLGE_64_BIT_RTN_DATANUM;
|
||||
}
|
||||
for (k = 0; k < n; k++) {
|
||||
*data++ += le64_to_cpu(*desc_data);
|
||||
desc_data++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
|
||||
{
|
||||
stats->pkt_curr_buf_cnt = 0;
|
||||
stats->pkt_curr_buf_tc0_cnt = 0;
|
||||
stats->pkt_curr_buf_tc1_cnt = 0;
|
||||
stats->pkt_curr_buf_tc2_cnt = 0;
|
||||
stats->pkt_curr_buf_tc3_cnt = 0;
|
||||
stats->pkt_curr_buf_tc4_cnt = 0;
|
||||
stats->pkt_curr_buf_tc5_cnt = 0;
|
||||
stats->pkt_curr_buf_tc6_cnt = 0;
|
||||
stats->pkt_curr_buf_tc7_cnt = 0;
|
||||
}
|
||||
|
||||
static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_32_BIT_CMD_NUM 8
|
||||
#define HCLGE_32_BIT_RTN_DATANUM 8
|
||||
|
||||
struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
|
||||
struct hclge_32_bit_stats *all_32_bit_stats;
|
||||
__le32 *desc_data;
|
||||
int i, k, n;
|
||||
u64 *data;
|
||||
int ret;
|
||||
|
||||
all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
|
||||
data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get 32 bit pkt stats fail, status = %d.\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
hclge_reset_partial_32bit_counter(all_32_bit_stats);
|
||||
for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
|
||||
if (unlikely(i == 0)) {
|
||||
__le16 *desc_data_16bit;
|
||||
|
||||
all_32_bit_stats->igu_rx_err_pkt +=
|
||||
le32_to_cpu(desc[i].data[0]);
|
||||
|
||||
desc_data_16bit = (__le16 *)&desc[i].data[1];
|
||||
all_32_bit_stats->igu_rx_no_eof_pkt +=
|
||||
le16_to_cpu(*desc_data_16bit);
|
||||
|
||||
desc_data_16bit++;
|
||||
all_32_bit_stats->igu_rx_no_sof_pkt +=
|
||||
le16_to_cpu(*desc_data_16bit);
|
||||
|
||||
desc_data = &desc[i].data[2];
|
||||
n = HCLGE_32_BIT_RTN_DATANUM - 4;
|
||||
} else {
|
||||
desc_data = (__le32 *)&desc[i];
|
||||
n = HCLGE_32_BIT_RTN_DATANUM;
|
||||
}
|
||||
for (k = 0; k < n; k++) {
|
||||
*data++ += le32_to_cpu(*desc_data);
|
||||
desc_data++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_mac_update_stats(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_MAC_CMD_NUM 21
|
||||
|
@ -675,14 +406,8 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
|
|||
struct net_device_stats *net_stats)
|
||||
{
|
||||
net_stats->tx_dropped = 0;
|
||||
net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
|
||||
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
|
||||
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
|
||||
|
||||
net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
|
||||
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
|
||||
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
|
||||
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
|
||||
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
|
||||
|
||||
net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
|
||||
|
@ -717,12 +442,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
|
|||
dev_err(&hdev->pdev->dev,
|
||||
"Update MAC stats fail, status = %d.\n", status);
|
||||
|
||||
status = hclge_32_bit_update_stats(hdev);
|
||||
if (status)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Update 32 bit stats fail, status = %d.\n",
|
||||
status);
|
||||
|
||||
hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
|
||||
}
|
||||
|
||||
|
@ -743,18 +462,6 @@ static void hclge_update_stats(struct hnae3_handle *handle,
|
|||
"Update MAC stats fail, status = %d.\n",
|
||||
status);
|
||||
|
||||
status = hclge_32_bit_update_stats(hdev);
|
||||
if (status)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Update 32 bit stats fail, status = %d.\n",
|
||||
status);
|
||||
|
||||
status = hclge_64_bit_update_stats(hdev);
|
||||
if (status)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Update 64 bit stats fail, status = %d.\n",
|
||||
status);
|
||||
|
||||
status = hclge_tqps_update_stats(handle);
|
||||
if (status)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
|
@ -793,8 +500,6 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
|
|||
handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
|
||||
} else if (stringset == ETH_SS_STATS) {
|
||||
count = ARRAY_SIZE(g_mac_stats_string) +
|
||||
ARRAY_SIZE(g_all_32bit_stats_string) +
|
||||
ARRAY_SIZE(g_all_64bit_stats_string) +
|
||||
hclge_tqps_get_sset_count(handle, stringset);
|
||||
}
|
||||
|
||||
|
@ -814,16 +519,6 @@ static void hclge_get_strings(struct hnae3_handle *handle,
|
|||
g_mac_stats_string,
|
||||
size,
|
||||
p);
|
||||
size = ARRAY_SIZE(g_all_32bit_stats_string);
|
||||
p = hclge_comm_get_strings(stringset,
|
||||
g_all_32bit_stats_string,
|
||||
size,
|
||||
p);
|
||||
size = ARRAY_SIZE(g_all_64bit_stats_string);
|
||||
p = hclge_comm_get_strings(stringset,
|
||||
g_all_64bit_stats_string,
|
||||
size,
|
||||
p);
|
||||
p = hclge_tqps_get_strings(handle, p);
|
||||
} else if (stringset == ETH_SS_TEST) {
|
||||
if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
|
||||
|
@ -857,14 +552,6 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
|
|||
g_mac_stats_string,
|
||||
ARRAY_SIZE(g_mac_stats_string),
|
||||
data);
|
||||
p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
|
||||
g_all_32bit_stats_string,
|
||||
ARRAY_SIZE(g_all_32bit_stats_string),
|
||||
p);
|
||||
p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
|
||||
g_all_64bit_stats_string,
|
||||
ARRAY_SIZE(g_all_64bit_stats_string),
|
||||
p);
|
||||
p = hclge_tqps_get_stats(handle, p);
|
||||
}
|
||||
|
||||
|
@ -2376,10 +2063,13 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
|
|||
int mac_state;
|
||||
int link_stat;
|
||||
|
||||
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
|
||||
return 0;
|
||||
|
||||
mac_state = hclge_get_mac_link_status(hdev);
|
||||
|
||||
if (hdev->hw.mac.phydev) {
|
||||
if (!genphy_read_status(hdev->hw.mac.phydev))
|
||||
if (hdev->hw.mac.phydev->state == PHY_RUNNING)
|
||||
link_stat = mac_state &
|
||||
hdev->hw.mac.phydev->link;
|
||||
else
|
||||
|
@ -3832,6 +3522,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
|
|||
struct hclge_dev *hdev = vport->back;
|
||||
int i;
|
||||
|
||||
set_bit(HCLGE_STATE_DOWN, &hdev->state);
|
||||
|
||||
del_timer_sync(&hdev->service_timer);
|
||||
cancel_work_sync(&hdev->service_task);
|
||||
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
|
||||
|
@ -4621,6 +4313,18 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
|
||||
int cmd)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
if (!hdev->hw.mac.phydev)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
|
||||
}
|
||||
|
||||
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
|
||||
bool filter_en)
|
||||
{
|
||||
|
@ -5458,26 +5162,31 @@ static int hclge_init_client_instance(struct hnae3_client *client,
|
|||
vport->nic.client = client;
|
||||
ret = client->ops->init_instance(&vport->nic);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_nic;
|
||||
|
||||
ret = hclge_init_instance_hw(hdev);
|
||||
if (ret) {
|
||||
client->ops->uninit_instance(&vport->nic,
|
||||
0);
|
||||
return ret;
|
||||
goto clear_nic;
|
||||
}
|
||||
|
||||
hnae3_set_client_init_flag(client, ae_dev, 1);
|
||||
|
||||
if (hdev->roce_client &&
|
||||
hnae3_dev_roce_supported(hdev)) {
|
||||
struct hnae3_client *rc = hdev->roce_client;
|
||||
|
||||
ret = hclge_init_roce_base_info(vport);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_roce;
|
||||
|
||||
ret = rc->ops->init_instance(&vport->roce);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_roce;
|
||||
|
||||
hnae3_set_client_init_flag(hdev->roce_client,
|
||||
ae_dev, 1);
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -5487,7 +5196,9 @@ static int hclge_init_client_instance(struct hnae3_client *client,
|
|||
|
||||
ret = client->ops->init_instance(&vport->nic);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_nic;
|
||||
|
||||
hnae3_set_client_init_flag(client, ae_dev, 1);
|
||||
|
||||
break;
|
||||
case HNAE3_CLIENT_ROCE:
|
||||
|
@ -5499,16 +5210,27 @@ static int hclge_init_client_instance(struct hnae3_client *client,
|
|||
if (hdev->roce_client && hdev->nic_client) {
|
||||
ret = hclge_init_roce_base_info(vport);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_roce;
|
||||
|
||||
ret = client->ops->init_instance(&vport->roce);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_roce;
|
||||
|
||||
hnae3_set_client_init_flag(client, ae_dev, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
clear_nic:
|
||||
hdev->nic_client = NULL;
|
||||
vport->nic.client = NULL;
|
||||
return ret;
|
||||
clear_roce:
|
||||
hdev->roce_client = NULL;
|
||||
vport->roce.client = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclge_uninit_client_instance(struct hnae3_client *client,
|
||||
|
@ -5528,7 +5250,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
|
|||
}
|
||||
if (client->type == HNAE3_CLIENT_ROCE)
|
||||
return;
|
||||
if (client->ops->uninit_instance) {
|
||||
if (hdev->nic_client && client->ops->uninit_instance) {
|
||||
hclge_uninit_instance_hw(hdev);
|
||||
client->ops->uninit_instance(&vport->nic, 0);
|
||||
hdev->nic_client = NULL;
|
||||
|
@ -6290,6 +6012,7 @@ static const struct hnae3_ae_ops hclge_ops = {
|
|||
.get_tc_size = hclge_get_tc_size,
|
||||
.get_mac_addr = hclge_get_mac_addr,
|
||||
.set_mac_addr = hclge_set_mac_addr,
|
||||
.do_ioctl = hclge_do_ioctl,
|
||||
.add_uc_addr = hclge_add_uc_addr,
|
||||
.rm_uc_addr = hclge_rm_uc_addr,
|
||||
.add_mc_addr = hclge_add_mc_addr,
|
||||
|
|
|
@ -256,109 +256,6 @@ struct hclge_comm_stats_str {
|
|||
unsigned long offset;
|
||||
};
|
||||
|
||||
/* all 64bit stats, opcode id: 0x0030 */
|
||||
struct hclge_64_bit_stats {
|
||||
/* query_igu_stat */
|
||||
u64 igu_rx_oversize_pkt;
|
||||
u64 igu_rx_undersize_pkt;
|
||||
u64 igu_rx_out_all_pkt;
|
||||
u64 igu_rx_uni_pkt;
|
||||
u64 igu_rx_multi_pkt;
|
||||
u64 igu_rx_broad_pkt;
|
||||
u64 rsv0;
|
||||
|
||||
/* query_egu_stat */
|
||||
u64 egu_tx_out_all_pkt;
|
||||
u64 egu_tx_uni_pkt;
|
||||
u64 egu_tx_multi_pkt;
|
||||
u64 egu_tx_broad_pkt;
|
||||
|
||||
/* ssu_ppp packet stats */
|
||||
u64 ssu_ppp_mac_key_num;
|
||||
u64 ssu_ppp_host_key_num;
|
||||
u64 ppp_ssu_mac_rlt_num;
|
||||
u64 ppp_ssu_host_rlt_num;
|
||||
|
||||
/* ssu_tx_in_out_dfx_stats */
|
||||
u64 ssu_tx_in_num;
|
||||
u64 ssu_tx_out_num;
|
||||
/* ssu_rx_in_out_dfx_stats */
|
||||
u64 ssu_rx_in_num;
|
||||
u64 ssu_rx_out_num;
|
||||
};
|
||||
|
||||
/* all 32bit stats, opcode id: 0x0031 */
|
||||
struct hclge_32_bit_stats {
|
||||
u64 igu_rx_err_pkt;
|
||||
u64 igu_rx_no_eof_pkt;
|
||||
u64 igu_rx_no_sof_pkt;
|
||||
u64 egu_tx_1588_pkt;
|
||||
u64 egu_tx_err_pkt;
|
||||
u64 ssu_full_drop_num;
|
||||
u64 ssu_part_drop_num;
|
||||
u64 ppp_key_drop_num;
|
||||
u64 ppp_rlt_drop_num;
|
||||
u64 ssu_key_drop_num;
|
||||
u64 pkt_curr_buf_cnt;
|
||||
u64 qcn_fb_rcv_cnt;
|
||||
u64 qcn_fb_drop_cnt;
|
||||
u64 qcn_fb_invaild_cnt;
|
||||
u64 rsv0;
|
||||
u64 rx_packet_tc0_in_cnt;
|
||||
u64 rx_packet_tc1_in_cnt;
|
||||
u64 rx_packet_tc2_in_cnt;
|
||||
u64 rx_packet_tc3_in_cnt;
|
||||
u64 rx_packet_tc4_in_cnt;
|
||||
u64 rx_packet_tc5_in_cnt;
|
||||
u64 rx_packet_tc6_in_cnt;
|
||||
u64 rx_packet_tc7_in_cnt;
|
||||
u64 rx_packet_tc0_out_cnt;
|
||||
u64 rx_packet_tc1_out_cnt;
|
||||
u64 rx_packet_tc2_out_cnt;
|
||||
u64 rx_packet_tc3_out_cnt;
|
||||
u64 rx_packet_tc4_out_cnt;
|
||||
u64 rx_packet_tc5_out_cnt;
|
||||
u64 rx_packet_tc6_out_cnt;
|
||||
u64 rx_packet_tc7_out_cnt;
|
||||
|
||||
/* Tx packet level statistics */
|
||||
u64 tx_packet_tc0_in_cnt;
|
||||
u64 tx_packet_tc1_in_cnt;
|
||||
u64 tx_packet_tc2_in_cnt;
|
||||
u64 tx_packet_tc3_in_cnt;
|
||||
u64 tx_packet_tc4_in_cnt;
|
||||
u64 tx_packet_tc5_in_cnt;
|
||||
u64 tx_packet_tc6_in_cnt;
|
||||
u64 tx_packet_tc7_in_cnt;
|
||||
u64 tx_packet_tc0_out_cnt;
|
||||
u64 tx_packet_tc1_out_cnt;
|
||||
u64 tx_packet_tc2_out_cnt;
|
||||
u64 tx_packet_tc3_out_cnt;
|
||||
u64 tx_packet_tc4_out_cnt;
|
||||
u64 tx_packet_tc5_out_cnt;
|
||||
u64 tx_packet_tc6_out_cnt;
|
||||
u64 tx_packet_tc7_out_cnt;
|
||||
|
||||
/* packet buffer statistics */
|
||||
u64 pkt_curr_buf_tc0_cnt;
|
||||
u64 pkt_curr_buf_tc1_cnt;
|
||||
u64 pkt_curr_buf_tc2_cnt;
|
||||
u64 pkt_curr_buf_tc3_cnt;
|
||||
u64 pkt_curr_buf_tc4_cnt;
|
||||
u64 pkt_curr_buf_tc5_cnt;
|
||||
u64 pkt_curr_buf_tc6_cnt;
|
||||
u64 pkt_curr_buf_tc7_cnt;
|
||||
|
||||
u64 mb_uncopy_num;
|
||||
u64 lo_pri_unicast_rlt_drop_num;
|
||||
u64 hi_pri_multicast_rlt_drop_num;
|
||||
u64 lo_pri_multicast_rlt_drop_num;
|
||||
u64 rx_oq_drop_pkt_cnt;
|
||||
u64 tx_oq_drop_pkt_cnt;
|
||||
u64 nic_l2_err_drop_pkt_cnt;
|
||||
u64 roc_l2_err_drop_pkt_cnt;
|
||||
};
|
||||
|
||||
/* mac stats ,opcode id: 0x0032 */
|
||||
struct hclge_mac_stats {
|
||||
u64 mac_tx_mac_pause_num;
|
||||
|
@ -450,8 +347,6 @@ struct hclge_mac_stats {
|
|||
#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
|
||||
struct hclge_hw_stats {
|
||||
struct hclge_mac_stats mac_stats;
|
||||
struct hclge_64_bit_stats all_64_bit_stats;
|
||||
struct hclge_32_bit_stats all_32_bit_stats;
|
||||
u32 stats_timer;
|
||||
};
|
||||
|
||||
|
|
|
@ -297,7 +297,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
|
|||
}
|
||||
|
||||
static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
|
||||
u8 q_id, u16 qs_id)
|
||||
u16 q_id, u16 qs_id)
|
||||
{
|
||||
struct hclge_nq_to_qs_link_cmd *map;
|
||||
struct hclge_desc desc;
|
||||
|
|
|
@ -132,8 +132,8 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
|
|||
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
|
||||
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
|
||||
break;
|
||||
case HCLGEVF_TYPE_CRQ:
|
||||
reg_val = (u32)ring->desc_dma_addr;
|
||||
|
@ -145,8 +145,8 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
|
|||
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
|
||||
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -299,6 +299,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
|
|||
|
||||
client = handle->client;
|
||||
|
||||
link_state =
|
||||
test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
|
||||
|
||||
if (link_state != hdev->hw.mac.link) {
|
||||
client->ops->link_status_change(handle, !!link_state);
|
||||
hdev->hw.mac.link = link_state;
|
||||
|
@ -1448,6 +1451,8 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
|
|||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
int i, queue_id;
|
||||
|
||||
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
|
||||
|
||||
for (i = 0; i < hdev->num_tqps; i++) {
|
||||
/* Ring disable */
|
||||
queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
|
||||
|
@ -1619,17 +1624,22 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
|
|||
|
||||
ret = client->ops->init_instance(&hdev->nic);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_nic;
|
||||
|
||||
hnae3_set_client_init_flag(client, ae_dev, 1);
|
||||
|
||||
if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
|
||||
struct hnae3_client *rc = hdev->roce_client;
|
||||
|
||||
ret = hclgevf_init_roce_base_info(hdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_roce;
|
||||
ret = rc->ops->init_instance(&hdev->roce);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_roce;
|
||||
|
||||
hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
|
||||
1);
|
||||
}
|
||||
break;
|
||||
case HNAE3_CLIENT_UNIC:
|
||||
|
@ -1638,7 +1648,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
|
|||
|
||||
ret = client->ops->init_instance(&hdev->nic);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_nic;
|
||||
|
||||
hnae3_set_client_init_flag(client, ae_dev, 1);
|
||||
break;
|
||||
case HNAE3_CLIENT_ROCE:
|
||||
if (hnae3_dev_roce_supported(hdev)) {
|
||||
|
@ -1649,15 +1661,26 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
|
|||
if (hdev->roce_client && hdev->nic_client) {
|
||||
ret = hclgevf_init_roce_base_info(hdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_roce;
|
||||
|
||||
ret = client->ops->init_instance(&hdev->roce);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto clear_roce;
|
||||
}
|
||||
|
||||
hnae3_set_client_init_flag(client, ae_dev, 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
clear_nic:
|
||||
hdev->nic_client = NULL;
|
||||
hdev->nic.client = NULL;
|
||||
return ret;
|
||||
clear_roce:
|
||||
hdev->roce_client = NULL;
|
||||
hdev->roce.client = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclgevf_uninit_client_instance(struct hnae3_client *client,
|
||||
|
@ -1666,13 +1689,19 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
|
|||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
/* un-init roce, if it exists */
|
||||
if (hdev->roce_client)
|
||||
if (hdev->roce_client) {
|
||||
hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
|
||||
hdev->roce_client = NULL;
|
||||
hdev->roce.client = NULL;
|
||||
}
|
||||
|
||||
/* un-init nic/unic, if this was not called by roce client */
|
||||
if ((client->ops->uninit_instance) &&
|
||||
(client->type != HNAE3_CLIENT_ROCE))
|
||||
if (client->ops->uninit_instance && hdev->nic_client &&
|
||||
client->type != HNAE3_CLIENT_ROCE) {
|
||||
client->ops->uninit_instance(&hdev->nic, 0);
|
||||
hdev->nic_client = NULL;
|
||||
hdev->nic.client = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int hclgevf_pci_init(struct hclgevf_dev *hdev)
|
||||
|
|
Loading…
Reference in New Issue