Merge branch 'hns3-misc'

Huazhong Tan says:

====================
net: hns3: misc updates for -next

This series include some updates for the HNS3 ethernet driver.

   flow director configuration").
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-03-29 13:21:01 -07:00
commit 0bc7f8d54a
11 changed files with 324 additions and 182 deletions

View File

@ -579,7 +579,7 @@ struct hnae3_ae_ops {
int vector_num,
struct hnae3_ring_chain_node *vr_chain);
int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
int (*reset_queue)(struct hnae3_handle *handle);
u32 (*get_fw_version)(struct hnae3_handle *handle);
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);

View File

@ -694,7 +694,7 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
}
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
u16 *mss, u32 *type_cs_vlan_tso)
u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes)
{
u32 l4_offset, hdr_len;
union l3_hdr_info l3;
@ -750,6 +750,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
(__force __wsum)htonl(l4_paylen));
}
*send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len;
/* find the txbd field values */
*paylen_fdop_ol4cs = skb->len - hdr_len;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
@ -1076,7 +1078,8 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
}
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
struct sk_buff *skb, struct hns3_desc *desc)
struct sk_buff *skb, struct hns3_desc *desc,
struct hns3_desc_cb *desc_cb)
{
u32 ol_type_vlan_len_msec = 0;
u32 paylen_ol4cs = skb->len;
@ -1105,6 +1108,8 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
1);
}
desc_cb->send_bytes = skb->len;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ol4_proto, il4_proto;
@ -1140,7 +1145,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
}
ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
&type_cs_vlan_tso);
&type_cs_vlan_tso, &desc_cb->send_bytes);
if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_tso_err++;
@ -1275,30 +1280,29 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
}
static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
u8 max_non_tso_bd_num)
u8 max_non_tso_bd_num, unsigned int bd_num,
unsigned int recursion_level)
{
#define HNS3_MAX_RECURSION_LEVEL 24
struct sk_buff *frag_skb;
unsigned int bd_num = 0;
/* If the total len is within the max bd limit */
if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
!skb_has_frag_list(skb) &&
skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
return skb_shinfo(skb)->nr_frags + 1U;
/* The below case will always be linearized, return
* HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
*/
if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len >
HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
return HNS3_MAX_TSO_BD_NUM + 1U;
if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
return UINT_MAX;
bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
return bd_num;
skb_walk_frags(skb, frag_skb) {
bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
bd_num, recursion_level + 1);
if (bd_num > HNS3_MAX_TSO_BD_NUM)
return bd_num;
}
@ -1358,6 +1362,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
size[i] = skb_frag_size(&shinfo->frags[i]);
}
static int hns3_skb_linearize(struct hns3_enet_ring *ring,
struct sk_buff *skb,
u8 max_non_tso_bd_num,
unsigned int bd_num)
{
/* 'bd_num == UINT_MAX' means the skb' fraglist has a
* recursion level of over HNS3_MAX_RECURSION_LEVEL.
*/
if (bd_num == UINT_MAX) {
u64_stats_update_begin(&ring->syncp);
ring->stats.over_max_recursion++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM;
}
/* The skb->len has exceeded the hw limitation, linearization
* will not help.
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len >
HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM;
}
if (__skb_linearize(skb)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM;
}
return 0;
}
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct net_device *netdev,
struct sk_buff *skb)
@ -1367,7 +1408,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
unsigned int bd_num;
bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
if (unlikely(bd_num > max_non_tso_bd_num)) {
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
!hns3_skb_need_linearized(skb, bd_size, bd_num,
@ -1376,16 +1417,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
goto out;
}
if (__skb_linearize(skb))
if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
bd_num))
return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len);
if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
(!skb_is_gso(skb) &&
bd_num > max_non_tso_bd_num)) {
trace_hns3_over_max_bd(skb);
return -ENOMEM;
}
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
@ -1409,6 +1445,10 @@ out:
return bd_num;
}
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
return -EBUSY;
}
@ -1456,6 +1496,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
struct sk_buff *skb, enum hns_desc_type type)
{
unsigned int size = skb_headlen(skb);
struct sk_buff *frag_skb;
int i, ret, bd_num = 0;
if (size) {
@ -1480,6 +1521,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
bd_num += ret;
}
skb_walk_frags(skb, frag_skb) {
ret = hns3_fill_skb_to_desc(ring, frag_skb,
DESC_TYPE_FRAGLIST_SKB);
if (unlikely(ret < 0))
return ret;
bd_num += ret;
}
return bd_num;
}
@ -1508,16 +1558,20 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct netdev_queue *dev_queue;
int pre_ntu, next_to_use_head;
struct sk_buff *frag_skb;
int bd_num = 0;
bool doorbell;
int ret;
/* Hardware can only handle short frames above 32 bytes */
if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return NETDEV_TX_OK;
}
@ -1527,15 +1581,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
if (unlikely(ret <= 0)) {
if (ret == -EBUSY) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
hns3_tx_doorbell(ring, 0, true);
return NETDEV_TX_BUSY;
} else if (ret == -ENOMEM) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
}
hns3_rl_err(netdev, "xmit error: %d!\n", ret);
@ -1544,25 +1591,19 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
desc_cb);
if (unlikely(ret < 0))
goto fill_err;
/* 'ret < 0' means filling error, 'ret == 0' means skb->len is
* zero, which is unlikely, and 'ret > 0' means how many tx desc
* need to be notified to the hw.
*/
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
if (unlikely(ret < 0))
if (unlikely(ret <= 0))
goto fill_err;
bd_num += ret;
skb_walk_frags(skb, frag_skb) {
ret = hns3_fill_skb_to_desc(ring, frag_skb,
DESC_TYPE_FRAGLIST_SKB);
if (unlikely(ret < 0))
goto fill_err;
bd_num += ret;
}
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
@ -1571,9 +1612,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
netdev_xmit_more());
hns3_tx_doorbell(ring, bd_num, doorbell);
hns3_tx_doorbell(ring, ret, doorbell);
return NETDEV_TX_OK;
@ -1745,11 +1786,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_drop += ring->stats.tx_l4_proto_err;
tx_drop += ring->stats.tx_l2l3l4_err;
tx_drop += ring->stats.tx_tso_err;
tx_drop += ring->stats.over_max_recursion;
tx_drop += ring->stats.hw_limitation;
tx_errors += ring->stats.sw_err_cnt;
tx_errors += ring->stats.tx_vlan_err;
tx_errors += ring->stats.tx_l4_proto_err;
tx_errors += ring->stats.tx_l2l3l4_err;
tx_errors += ring->stats.tx_tso_err;
tx_errors += ring->stats.over_max_recursion;
tx_errors += ring->stats.hw_limitation;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@ -2688,8 +2733,12 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
break;
desc_cb = &ring->desc_cb[ntc];
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
if (desc_cb->type == DESC_TYPE_SKB) {
(*pkts)++;
(*bytes) += desc_cb->send_bytes;
}
/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
hns3_free_buffer_detach(ring, ntc, budget);
@ -4455,11 +4504,11 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
int i, j;
int ret;
for (i = 0; i < h->kinfo.num_tqps; i++) {
ret = h->ae_algo->ops->reset_queue(h, i);
if (ret)
return ret;
ret = h->ae_algo->ops->reset_queue(h);
if (ret)
return ret;
for (i = 0; i < h->kinfo.num_tqps; i++) {
hns3_init_ring_hw(&priv->ring[i]);
/* We need to clear tx ring here because self test will

View File

@ -298,7 +298,12 @@ struct hns3_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack */
void *priv;
u32 page_offset;
union {
u32 page_offset; /* for rx */
u32 send_bytes; /* for tx */
};
u32 length; /* length of the buffer */
u16 reuse_flag;
@ -376,6 +381,8 @@ struct ring_stats {
u64 tx_l4_proto_err;
u64 tx_l2l3l4_err;
u64 tx_tso_err;
u64 over_max_recursion;
u64 hw_limitation;
};
struct {
u64 rx_pkts;

View File

@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
HNS3_TQP_STAT("tso_err", tx_tso_err),
HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
HNS3_TQP_STAT("hw_limitation", hw_limitation),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)

View File

@ -948,10 +948,16 @@ struct hclge_reset_tqp_queue_cmd {
#define HCLGE_CFG_RESET_MAC_B 3
#define HCLGE_CFG_RESET_FUNC_B 7
#define HCLGE_CFG_RESET_RCB_B 1
struct hclge_reset_cmd {
u8 mac_func_reset;
u8 fun_reset_vfid;
u8 rsv[22];
u8 fun_reset_rcb;
u8 rsv;
__le16 fun_reset_rcb_vqid_start;
__le16 fun_reset_rcb_vqid_num;
u8 fun_reset_rcb_return_status;
u8 rsv1[15];
};
#define HCLGE_PF_RESET_DONE_BIT BIT(0)

View File

@ -6439,8 +6439,9 @@ static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
if (ret)
goto out;
hclge_update_fd_list(hdev, HCLGE_FD_ACTIVE, rule->location, rule);
rule->state = HCLGE_FD_ACTIVE;
hdev->fd_active_type = rule->rule_type;
hclge_update_fd_list(hdev, rule->state, rule->location, rule);
out:
spin_unlock_bh(&hdev->fd_rule_lock);
@ -7002,6 +7003,7 @@ static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
rule->action = 0;
rule->vf_id = 0;
rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
rule->state = HCLGE_FD_TO_ADD;
if (tuples->ether_proto == ETH_P_IP) {
if (tuples->ip_proto == IPPROTO_TCP)
rule->flow_type = TCP_V4_FLOW;
@ -7064,8 +7066,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
rule->arfs.flow_id = flow_id;
rule->queue_id = queue_id;
hclge_fd_build_arfs_rule(&new_tuples, rule);
hclge_update_fd_list(hdev, HCLGE_FD_TO_ADD, rule->location,
rule);
hclge_update_fd_list(hdev, rule->state, rule->location, rule);
hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
} else if (rule->queue_id != queue_id) {
rule->queue_id = queue_id;
@ -7796,13 +7797,12 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
u16 stream_id, bool enable)
{
struct hclge_desc desc;
struct hclge_cfg_com_tqp_queue_cmd *req =
(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id);
@ -7810,20 +7810,30 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
if (enable)
req->enable |= 1U << HCLGE_TQP_ENABLE_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"Tqp enable fail, status =%d.\n", ret);
return ret;
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int ret;
u16 i;
for (i = 0; i < handle->kinfo.num_tqps; i++) {
ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
if (ret)
return ret;
}
return 0;
}
static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo;
struct hclge_dev *hdev = vport->back;
int i, ret;
int ret;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
@ -7860,14 +7870,12 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
if (ret)
return ret;
kinfo = &vport->nic.kinfo;
for (i = 0; i < kinfo->num_tqps; i++) {
ret = hclge_tqp_enable(hdev, i, 0, en);
if (ret)
return ret;
}
ret = hclge_tqp_enable(handle, en);
if (ret)
dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
en ? "enable" : "disable", ret);
return 0;
return ret;
}
static int hclge_set_default_loopback(struct hclge_dev *hdev)
@ -7954,7 +7962,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
spin_lock_bh(&hdev->fd_rule_lock);
@ -7971,8 +7978,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
return;
}
for (i = 0; i < handle->kinfo.num_tqps; i++)
hclge_reset_tqp(handle, i);
hclge_reset_tqp(handle);
hclge_config_mac_tnl_int(hdev, false);
@ -10346,7 +10352,7 @@ out:
return ret;
}
static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
bool enable)
{
struct hclge_reset_tqp_queue_cmd *req;
@ -10402,94 +10408,114 @@ u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
return tqp->index;
}
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
u16 reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
u16 i;
queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
for (i = 0; i < handle->kinfo.num_tqps; i++) {
queue_gid = hclge_covert_handle_qid_global(handle, i);
ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to send reset tqp cmd, ret = %d\n",
ret);
return ret;
}
ret = hclge_tqp_enable(hdev, queue_id, 0, false);
if (ret) {
dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
return ret;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
/* Wait for tqp hw reset */
usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
"wait for tqp hw reset timeout\n");
return -ETIME;
}
ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to deassert soft reset, ret = %d\n",
ret);
return ret;
}
reset_try_times = 0;
}
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"Send reset tqp cmd fail, ret = %d\n", ret);
return ret;
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
/* Wait for tqp hw reset */
usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
return ret;
}
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
if (ret)
dev_err(&hdev->pdev->dev,
"Deassert the soft reset fail, ret = %d\n", ret);
return ret;
return 0;
}
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
static int hclge_reset_rcb(struct hnae3_handle *handle)
{
struct hnae3_handle *handle = &vport->nic;
#define HCLGE_RESET_RCB_NOT_SUPPORT 0U
#define HCLGE_RESET_RCB_SUCCESS 1U
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
struct hclge_reset_cmd *req;
struct hclge_desc desc;
u8 return_status;
u16 queue_gid;
int ret;
if (queue_id >= handle->kinfo.num_tqps) {
dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
queue_id);
return;
}
queue_gid = hclge_covert_handle_qid_global(handle, 0);
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
req = (struct hclge_reset_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_warn(&hdev->pdev->dev,
"Send reset tqp cmd fail, ret = %d\n", ret);
return;
dev_err(&hdev->pdev->dev,
"failed to send rcb reset cmd, ret = %d\n", ret);
return ret;
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
return_status = req->fun_reset_rcb_return_status;
if (return_status == HCLGE_RESET_RCB_SUCCESS)
return 0;
/* Wait for tqp hw reset */
usleep_range(1000, 1200);
if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
return_status);
return -EIO;
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
return;
/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
* again to reset all tqps
*/
return hclge_reset_tqp_cmd(handle);
}
int hclge_reset_tqp(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int ret;
/* only need to disable PF's tqp */
if (!vport->vport_id) {
ret = hclge_tqp_enable(handle, false);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to disable tqp, ret = %d\n", ret);
return ret;
}
}
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
if (ret)
dev_warn(&hdev->pdev->dev,
"Deassert the soft reset fail, ret = %d\n", ret);
return hclge_reset_rcb(handle);
}
static u32 hclge_get_fw_version(struct hnae3_handle *handle)
@ -11929,7 +11955,6 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
#define REG_SEPARATOR_LINE 1
#define REG_NUM_REMAIN_MASK 3
#define BD_LIST_MAX_NUM 30
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
@ -12023,15 +12048,19 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
{
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
int data_len_per_desc, bd_num, i;
int bd_num_list[BD_LIST_MAX_NUM];
int *bd_num_list;
u32 data_len;
int ret;
bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
if (!bd_num_list)
return -ENOMEM;
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get dfx reg bd num fail, status is %d.\n", ret);
return ret;
goto out;
}
data_len_per_desc = sizeof_field(struct hclge_desc, data);
@ -12042,6 +12071,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
}
out:
kfree(bd_num_list);
return ret;
}
@ -12049,16 +12080,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
{
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
int bd_num, bd_num_max, buf_len, i;
int bd_num_list[BD_LIST_MAX_NUM];
struct hclge_desc *desc_src;
int *bd_num_list;
u32 *reg = data;
int ret;
bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
if (!bd_num_list)
return -ENOMEM;
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get dfx reg bd num fail, status is %d.\n", ret);
return ret;
goto out;
}
bd_num_max = bd_num_list[0];
@ -12067,8 +12102,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
buf_len = sizeof(*desc_src) * bd_num_max;
desc_src = kzalloc(buf_len, GFP_KERNEL);
if (!desc_src)
return -ENOMEM;
if (!desc_src) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < dfx_reg_type_num; i++) {
bd_num = bd_num_list[i];
@ -12084,6 +12121,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
}
kfree(desc_src);
out:
kfree(bd_num_list);
return ret;
}

View File

@ -1053,8 +1053,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_reset_tqp(struct hnae3_handle *handle);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);

View File

@ -550,14 +550,32 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
}
static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_RESET_ALL_QUEUE_DONE 1U
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
u16 queue_id;
int ret;
memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE;
resp_msg->len = sizeof(u8);
hclge_reset_vf_queue(vport, queue_id);
/* pf will reset vf's all queues at a time. So it is unnecessary
* to reset queues if queue_id > 0, just return success.
*/
if (queue_id > 0)
return 0;
ret = hclge_reset_tqp(handle);
if (ret)
dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n",
vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret);
return ret;
}
static int hclge_reset_vf(struct hclge_vport *vport)
@ -783,7 +801,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_QUEUE_RESET:
hclge_mbx_reset_vf_queue(vport, req);
ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg);
break;
case HCLGE_MBX_RESET:
ret = hclge_reset_vf(vport);

View File

@ -664,15 +664,6 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
/* if user not set rss, the rss_size should compare with the
* valid msi numbers to ensure one to one map between tqp and
* irq as default.
*/
if (!kinfo->req_rss_size)
max_rss_size = min_t(u16, max_rss_size,
(hdev->num_nic_msi - 1) /
kinfo->tc_info.num_tc);
/* Set to the maximum specification value (max_rss_size). */
kinfo->rss_size = max_rss_size;
}

View File

@ -223,11 +223,14 @@ struct hclgevf_rss_indirection_table_cmd {
};
#define HCLGEVF_RSS_TC_OFFSET_S 0
#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S)
#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGEVF_RSS_TC_SIZE_MSB_B 11
#define HCLGEVF_RSS_TC_SIZE_S 12
#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S)
#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGEVF_RSS_TC_VALID_B 15
#define HCLGEVF_MAX_TC_NUM 8
#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3
struct hclgevf_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
u8 rsv[8];

View File

@ -706,6 +706,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
(tc_valid[i] & 0x1));
hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B,
tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET &
0x1);
hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
@ -1240,12 +1243,11 @@ static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
}
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
u16 stream_id, bool enable)
{
struct hclgevf_cfg_com_tqp_queue_cmd *req;
struct hclgevf_desc desc;
int status;
req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
@ -1256,12 +1258,22 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
if (enable)
req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
"TQP enable fail, status =%d.\n", status);
return hclgevf_cmd_send(&hdev->hw, &desc, 1);
}
return status;
static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
u16 i;
for (i = 0; i < handle->kinfo.num_tqps; i++) {
ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
if (ret)
return ret;
}
return 0;
}
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
@ -1710,20 +1722,39 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
u8 return_status = 0;
int ret;
u16 i;
/* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
if (ret)
ret = hclgevf_tqp_enable(handle, false);
if (ret) {
dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
ret);
return ret;
}
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
memcpy(send_msg.data, &queue_id, sizeof(queue_id));
return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
sizeof(return_status));
if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
return ret;
for (i = 1; i < handle->kinfo.num_tqps; i++) {
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
memcpy(send_msg.data, &i, sizeof(i));
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (ret)
return ret;
}
return 0;
}
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
@ -2636,14 +2667,11 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
static void hclgevf_ae_stop(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int i;
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
if (hdev->reset_type != HNAE3_VF_RESET)
for (i = 0; i < handle->kinfo.num_tqps; i++)
if (hclgevf_reset_tqp(handle, i))
break;
hclgevf_reset_tqp(handle);
hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);