Merge branch 'hns3-fixes'
Huazhong Tan says: ==================== net: hns3: fixes for -net This series includes some bugfixes for the HNS3 ethernet driver. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c9fd37a945
|
@ -264,22 +264,17 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
|
||||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
|
||||||
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
|
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
|
||||||
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
|
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
|
||||||
|
struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
|
||||||
|
struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
|
||||||
|
|
||||||
/* initialize the configuration for interrupt coalescing.
|
tx_coal->adapt_enable = ptx_coal->adapt_enable;
|
||||||
* 1. GL (Interrupt Gap Limiter)
|
rx_coal->adapt_enable = prx_coal->adapt_enable;
|
||||||
* 2. RL (Interrupt Rate Limiter)
|
|
||||||
* 3. QL (Interrupt Quantity Limiter)
|
|
||||||
*
|
|
||||||
* Default: enable interrupt coalescing self-adaptive and GL
|
|
||||||
*/
|
|
||||||
tx_coal->adapt_enable = 1;
|
|
||||||
rx_coal->adapt_enable = 1;
|
|
||||||
|
|
||||||
tx_coal->int_gl = HNS3_INT_GL_50K;
|
tx_coal->int_gl = ptx_coal->int_gl;
|
||||||
rx_coal->int_gl = HNS3_INT_GL_50K;
|
rx_coal->int_gl = prx_coal->int_gl;
|
||||||
|
|
||||||
rx_coal->flow_level = HNS3_FLOW_LOW;
|
rx_coal->flow_level = prx_coal->flow_level;
|
||||||
tx_coal->flow_level = HNS3_FLOW_LOW;
|
tx_coal->flow_level = ptx_coal->flow_level;
|
||||||
|
|
||||||
/* device version above V3(include V3), GL can configure 1us
|
/* device version above V3(include V3), GL can configure 1us
|
||||||
* unit, so uses 1us unit.
|
* unit, so uses 1us unit.
|
||||||
|
@ -294,8 +289,8 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
|
||||||
rx_coal->ql_enable = 1;
|
rx_coal->ql_enable = 1;
|
||||||
tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
|
tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
|
||||||
rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
|
rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
|
||||||
tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
|
tx_coal->int_ql = ptx_coal->int_ql;
|
||||||
rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
|
rx_coal->int_ql = prx_coal->int_ql;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -846,8 +841,6 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
|
||||||
l4.udp->dest == htons(4790))))
|
l4.udp->dest == htons(4790))))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
skb_checksum_help(skb);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -924,8 +917,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
|
||||||
/* the stack computes the IP header already,
|
/* the stack computes the IP header already,
|
||||||
* driver calculate l4 checksum when not TSO.
|
* driver calculate l4 checksum when not TSO.
|
||||||
*/
|
*/
|
||||||
skb_checksum_help(skb);
|
return skb_checksum_help(skb);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
|
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
|
||||||
|
@ -970,7 +962,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
|
||||||
break;
|
break;
|
||||||
case IPPROTO_UDP:
|
case IPPROTO_UDP:
|
||||||
if (hns3_tunnel_csum_bug(skb))
|
if (hns3_tunnel_csum_bug(skb))
|
||||||
break;
|
return skb_checksum_help(skb);
|
||||||
|
|
||||||
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
||||||
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
|
||||||
|
@ -995,8 +987,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
|
||||||
/* the stack computes the IP header already,
|
/* the stack computes the IP header already,
|
||||||
* driver calculate l4 checksum when not TSO.
|
* driver calculate l4 checksum when not TSO.
|
||||||
*/
|
*/
|
||||||
skb_checksum_help(skb);
|
return skb_checksum_help(skb);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3844,6 +3835,34 @@ map_ring_fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
|
||||||
|
{
|
||||||
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
|
||||||
|
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
|
||||||
|
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
|
||||||
|
|
||||||
|
/* initialize the configuration for interrupt coalescing.
|
||||||
|
* 1. GL (Interrupt Gap Limiter)
|
||||||
|
* 2. RL (Interrupt Rate Limiter)
|
||||||
|
* 3. QL (Interrupt Quantity Limiter)
|
||||||
|
*
|
||||||
|
* Default: enable interrupt coalescing self-adaptive and GL
|
||||||
|
*/
|
||||||
|
tx_coal->adapt_enable = 1;
|
||||||
|
rx_coal->adapt_enable = 1;
|
||||||
|
|
||||||
|
tx_coal->int_gl = HNS3_INT_GL_50K;
|
||||||
|
rx_coal->int_gl = HNS3_INT_GL_50K;
|
||||||
|
|
||||||
|
rx_coal->flow_level = HNS3_FLOW_LOW;
|
||||||
|
tx_coal->flow_level = HNS3_FLOW_LOW;
|
||||||
|
|
||||||
|
if (ae_dev->dev_specs.int_ql_max) {
|
||||||
|
tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
|
||||||
|
rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
|
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
|
||||||
{
|
{
|
||||||
struct hnae3_handle *h = priv->ae_handle;
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
|
@ -4295,6 +4314,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||||
goto out_get_ring_cfg;
|
goto out_get_ring_cfg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hns3_nic_init_coal_cfg(priv);
|
||||||
|
|
||||||
ret = hns3_nic_alloc_vector_data(priv);
|
ret = hns3_nic_alloc_vector_data(priv);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -4317,12 +4338,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_init_phy;
|
goto out_init_phy;
|
||||||
|
|
||||||
ret = register_netdev(netdev);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(priv->dev, "probe register netdev fail!\n");
|
|
||||||
goto out_reg_netdev_fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* the device can work without cpu rmap, only aRFS needs it */
|
/* the device can work without cpu rmap, only aRFS needs it */
|
||||||
ret = hns3_set_rx_cpu_rmap(netdev);
|
ret = hns3_set_rx_cpu_rmap(netdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -4355,17 +4370,23 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||||
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
|
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
|
||||||
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
|
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
|
||||||
|
|
||||||
|
ret = register_netdev(netdev);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(priv->dev, "probe register netdev fail!\n");
|
||||||
|
goto out_reg_netdev_fail;
|
||||||
|
}
|
||||||
|
|
||||||
if (netif_msg_drv(handle))
|
if (netif_msg_drv(handle))
|
||||||
hns3_info_show(priv);
|
hns3_info_show(priv);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
out_reg_netdev_fail:
|
||||||
|
hns3_dbg_uninit(handle);
|
||||||
out_client_start:
|
out_client_start:
|
||||||
hns3_free_rx_cpu_rmap(netdev);
|
hns3_free_rx_cpu_rmap(netdev);
|
||||||
hns3_nic_uninit_irq(priv);
|
hns3_nic_uninit_irq(priv);
|
||||||
out_init_irq_fail:
|
out_init_irq_fail:
|
||||||
unregister_netdev(netdev);
|
|
||||||
out_reg_netdev_fail:
|
|
||||||
hns3_uninit_phy(netdev);
|
hns3_uninit_phy(netdev);
|
||||||
out_init_phy:
|
out_init_phy:
|
||||||
hns3_uninit_all_ring(priv);
|
hns3_uninit_all_ring(priv);
|
||||||
|
@ -4571,31 +4592,6 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns3_store_coal(struct hns3_nic_priv *priv)
|
|
||||||
{
|
|
||||||
/* ethtool only support setting and querying one coal
|
|
||||||
* configuration for now, so save the vector 0' coal
|
|
||||||
* configuration here in order to restore it.
|
|
||||||
*/
|
|
||||||
memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
|
|
||||||
sizeof(struct hns3_enet_coalesce));
|
|
||||||
memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
|
|
||||||
sizeof(struct hns3_enet_coalesce));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void hns3_restore_coal(struct hns3_nic_priv *priv)
|
|
||||||
{
|
|
||||||
u16 vector_num = priv->vector_num;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < vector_num; i++) {
|
|
||||||
memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
|
|
||||||
sizeof(struct hns3_enet_coalesce));
|
|
||||||
memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
|
|
||||||
sizeof(struct hns3_enet_coalesce));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
|
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
|
||||||
{
|
{
|
||||||
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
||||||
|
@ -4654,8 +4650,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_put_ring;
|
goto err_put_ring;
|
||||||
|
|
||||||
hns3_restore_coal(priv);
|
|
||||||
|
|
||||||
ret = hns3_nic_init_vector_data(priv);
|
ret = hns3_nic_init_vector_data(priv);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_dealloc_vector;
|
goto err_dealloc_vector;
|
||||||
|
@ -4721,8 +4715,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
||||||
|
|
||||||
hns3_nic_uninit_vector_data(priv);
|
hns3_nic_uninit_vector_data(priv);
|
||||||
|
|
||||||
hns3_store_coal(priv);
|
|
||||||
|
|
||||||
hns3_nic_dealloc_vector_data(priv);
|
hns3_nic_dealloc_vector_data(priv);
|
||||||
|
|
||||||
hns3_uninit_all_ring(priv);
|
hns3_uninit_all_ring(priv);
|
||||||
|
|
|
@ -1134,50 +1134,32 @@ static void hns3_get_channels(struct net_device *netdev,
|
||||||
h->ae_algo->ops->get_channels(h, ch);
|
h->ae_algo->ops->get_channels(h, ch);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
|
static int hns3_get_coalesce(struct net_device *netdev,
|
||||||
struct ethtool_coalesce *cmd)
|
struct ethtool_coalesce *cmd)
|
||||||
{
|
{
|
||||||
struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
|
|
||||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
|
||||||
|
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
|
||||||
struct hnae3_handle *h = priv->ae_handle;
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
u16 queue_num = h->kinfo.num_tqps;
|
|
||||||
|
|
||||||
if (hns3_nic_resetting(netdev))
|
if (hns3_nic_resetting(netdev))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
if (queue >= queue_num) {
|
cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable;
|
||||||
netdev_err(netdev,
|
cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable;
|
||||||
"Invalid queue value %u! Queue max id=%u\n",
|
|
||||||
queue, queue_num - 1);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
tx_vector = priv->ring[queue].tqp_vector;
|
cmd->tx_coalesce_usecs = tx_coal->int_gl;
|
||||||
rx_vector = priv->ring[queue_num + queue].tqp_vector;
|
cmd->rx_coalesce_usecs = rx_coal->int_gl;
|
||||||
|
|
||||||
cmd->use_adaptive_tx_coalesce =
|
|
||||||
tx_vector->tx_group.coal.adapt_enable;
|
|
||||||
cmd->use_adaptive_rx_coalesce =
|
|
||||||
rx_vector->rx_group.coal.adapt_enable;
|
|
||||||
|
|
||||||
cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
|
|
||||||
cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
|
|
||||||
|
|
||||||
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
|
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
|
||||||
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
|
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
|
||||||
|
|
||||||
cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
|
cmd->tx_max_coalesced_frames = tx_coal->int_ql;
|
||||||
cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
|
cmd->rx_max_coalesced_frames = rx_coal->int_ql;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hns3_get_coalesce(struct net_device *netdev,
|
|
||||||
struct ethtool_coalesce *cmd)
|
|
||||||
{
|
|
||||||
return hns3_get_coalesce_per_queue(netdev, 0, cmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
|
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
|
||||||
struct ethtool_coalesce *cmd)
|
struct ethtool_coalesce *cmd)
|
||||||
{
|
{
|
||||||
|
@ -1292,19 +1274,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = hns3_check_ql_coalesce_param(netdev, cmd);
|
return hns3_check_ql_coalesce_param(netdev, cmd);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (cmd->use_adaptive_tx_coalesce == 1 ||
|
|
||||||
cmd->use_adaptive_rx_coalesce == 1) {
|
|
||||||
netdev_info(netdev,
|
|
||||||
"adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
|
|
||||||
cmd->use_adaptive_tx_coalesce,
|
|
||||||
cmd->use_adaptive_rx_coalesce);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns3_set_coalesce_per_queue(struct net_device *netdev,
|
static void hns3_set_coalesce_per_queue(struct net_device *netdev,
|
||||||
|
@ -1350,6 +1320,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
|
||||||
struct ethtool_coalesce *cmd)
|
struct ethtool_coalesce *cmd)
|
||||||
{
|
{
|
||||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
|
||||||
|
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
|
||||||
u16 queue_num = h->kinfo.num_tqps;
|
u16 queue_num = h->kinfo.num_tqps;
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
|
@ -1364,6 +1337,15 @@ static int hns3_set_coalesce(struct net_device *netdev,
|
||||||
h->kinfo.int_rl_setting =
|
h->kinfo.int_rl_setting =
|
||||||
hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
|
hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
|
||||||
|
|
||||||
|
tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce;
|
||||||
|
rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce;
|
||||||
|
|
||||||
|
tx_coal->int_gl = cmd->tx_coalesce_usecs;
|
||||||
|
rx_coal->int_gl = cmd->rx_coalesce_usecs;
|
||||||
|
|
||||||
|
tx_coal->int_ql = cmd->tx_max_coalesced_frames;
|
||||||
|
rx_coal->int_ql = cmd->rx_max_coalesced_frames;
|
||||||
|
|
||||||
for (i = 0; i < queue_num; i++)
|
for (i = 0; i < queue_num; i++)
|
||||||
hns3_set_coalesce_per_queue(netdev, cmd, i);
|
hns3_set_coalesce_per_queue(netdev, cmd, i);
|
||||||
|
|
||||||
|
|
|
@ -710,7 +710,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
|
||||||
unsigned int flag;
|
unsigned int flag;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
memset(&resp_msg, 0, sizeof(resp_msg));
|
|
||||||
/* handle all the mailbox requests in the queue */
|
/* handle all the mailbox requests in the queue */
|
||||||
while (!hclge_cmd_crq_empty(&hdev->hw)) {
|
while (!hclge_cmd_crq_empty(&hdev->hw)) {
|
||||||
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
|
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
|
||||||
|
@ -738,6 +737,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
|
||||||
|
|
||||||
trace_hclge_pf_mbx_get(hdev, req);
|
trace_hclge_pf_mbx_get(hdev, req);
|
||||||
|
|
||||||
|
/* clear the resp_msg before processing every mailbox message */
|
||||||
|
memset(&resp_msg, 0, sizeof(resp_msg));
|
||||||
|
|
||||||
switch (req->msg.code) {
|
switch (req->msg.code) {
|
||||||
case HCLGE_MBX_MAP_RING_TO_VECTOR:
|
case HCLGE_MBX_MAP_RING_TO_VECTOR:
|
||||||
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
|
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
|
||||||
|
|
Loading…
Reference in New Issue