2018-07-19 22:47:06 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
// Copyright (c) 2016-2017 Hisilicon Limited.
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/ipv6.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/pci.h>
|
2018-10-20 03:15:27 +08:00
|
|
|
#include <linux/aer.h>
|
2017-08-02 23:59:45 +08:00
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/sctp.h>
|
|
|
|
#include <linux/vermagic.h>
|
|
|
|
#include <net/gre.h>
|
2017-10-17 14:51:30 +08:00
|
|
|
#include <net/pkt_cls.h>
|
2018-11-15 17:29:25 +08:00
|
|
|
#include <net/tcp.h>
|
2017-08-02 23:59:45 +08:00
|
|
|
#include <net/vxlan.h>
|
|
|
|
|
|
|
|
#include "hnae3.h"
|
|
|
|
#include "hns3_enet.h"
|
|
|
|
|
2019-02-23 17:22:13 +08:00
|
|
|
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
|
2019-03-21 11:28:43 +08:00
|
|
|
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
|
2019-02-23 17:22:13 +08:00
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
static void hns3_clear_all_ring(struct hnae3_handle *h);
|
|
|
|
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
|
2018-10-06 01:03:25 +08:00
|
|
|
static void hns3_remove_hw_addr(struct net_device *netdev);
|
2018-05-26 02:43:04 +08:00
|
|
|
|
2017-10-09 15:44:01 +08:00
|
|
|
static const char hns3_driver_name[] = "hns3";
|
2017-08-02 23:59:45 +08:00
|
|
|
const char hns3_driver_version[] = VERMAGIC_STRING;
|
|
|
|
static const char hns3_driver_string[] =
|
|
|
|
"Hisilicon Ethernet Network Driver for Hip08 Family";
|
|
|
|
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
|
|
|
|
static struct hnae3_client client;
|
|
|
|
|
2019-04-19 11:05:43 +08:00
|
|
|
static int debug = -1;
|
|
|
|
module_param(debug, int, 0);
|
|
|
|
MODULE_PARM_DESC(debug, " Network interface message level setting");
|
|
|
|
|
|
|
|
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
|
|
|
|
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* hns3_pci_tbl - PCI Device ID Table
|
|
|
|
*
|
|
|
|
* Last entry must be all 0s
|
|
|
|
*
|
|
|
|
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
|
|
|
|
* Class, Class Mask, private data (not used) }
|
|
|
|
*/
|
|
|
|
static const struct pci_device_id hns3_pci_tbl[] = {
|
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
|
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
|
2017-09-20 18:52:50 +08:00
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
|
2017-09-20 18:52:51 +08:00
|
|
|
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
2017-09-20 18:52:50 +08:00
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
|
2017-09-20 18:52:51 +08:00
|
|
|
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
2017-09-20 18:52:50 +08:00
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
|
2017-09-20 18:52:51 +08:00
|
|
|
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
2017-09-20 18:52:50 +08:00
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
|
2017-09-20 18:52:51 +08:00
|
|
|
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
2017-09-20 18:52:50 +08:00
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
|
2017-09-20 18:52:51 +08:00
|
|
|
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
2017-12-15 02:03:06 +08:00
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
|
2018-08-03 17:56:30 +08:00
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
|
|
|
|
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
2017-08-02 23:59:45 +08:00
|
|
|
/* required last entry */
|
|
|
|
{0, }
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
|
|
|
|
|
2018-07-19 22:47:04 +08:00
|
|
|
static irqreturn_t hns3_irq_handle(int irq, void *vector)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
2018-07-19 22:47:04 +08:00
|
|
|
struct hns3_enet_tqp_vector *tqp_vector = vector;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:42 +08:00
|
|
|
napi_schedule_irqoff(&tqp_vector->napi);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2018-09-20 01:29:47 +08:00
|
|
|
/* This callback function is used to set affinity changes to the irq affinity
|
|
|
|
* masks when the irq_set_affinity_notifier function is used.
|
|
|
|
*/
|
|
|
|
static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
|
|
|
|
const cpumask_t *mask)
|
|
|
|
{
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vectors =
|
|
|
|
container_of(notify, struct hns3_enet_tqp_vector,
|
|
|
|
affinity_notify);
|
|
|
|
|
|
|
|
tqp_vectors->affinity_mask = *mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_nic_irq_affinity_release(struct kref *ref)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vectors;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->vector_num; i++) {
|
|
|
|
tqp_vectors = &priv->tqp_vector[i];
|
|
|
|
|
|
|
|
if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
|
|
|
|
continue;
|
|
|
|
|
2018-09-20 01:29:47 +08:00
|
|
|
/* clear the affinity notifier and affinity mask */
|
|
|
|
irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
|
|
|
|
irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* release the irq resource */
|
|
|
|
free_irq(tqp_vectors->vector_irq, tqp_vectors);
|
|
|
|
tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vectors;
|
|
|
|
int txrx_int_idx = 0;
|
|
|
|
int rx_int_idx = 0;
|
|
|
|
int tx_int_idx = 0;
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->vector_num; i++) {
|
|
|
|
tqp_vectors = &priv->tqp_vector[i];
|
|
|
|
|
|
|
|
if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
|
|
|
|
snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
|
|
|
|
"%s-%s-%d", priv->netdev->name, "TxRx",
|
|
|
|
txrx_int_idx++);
|
|
|
|
txrx_int_idx++;
|
|
|
|
} else if (tqp_vectors->rx_group.ring) {
|
|
|
|
snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
|
|
|
|
"%s-%s-%d", priv->netdev->name, "Rx",
|
|
|
|
rx_int_idx++);
|
|
|
|
} else if (tqp_vectors->tx_group.ring) {
|
|
|
|
snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
|
|
|
|
"%s-%s-%d", priv->netdev->name, "Tx",
|
|
|
|
tx_int_idx++);
|
|
|
|
} else {
|
|
|
|
/* Skip this unused q_vector */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
|
|
|
|
|
|
|
|
ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
|
|
|
|
tqp_vectors->name,
|
|
|
|
tqp_vectors);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(priv->netdev, "request irq(%d) fail\n",
|
|
|
|
tqp_vectors->vector_irq);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-20 01:29:47 +08:00
|
|
|
tqp_vectors->affinity_notify.notify =
|
|
|
|
hns3_nic_irq_affinity_notify;
|
|
|
|
tqp_vectors->affinity_notify.release =
|
|
|
|
hns3_nic_irq_affinity_release;
|
|
|
|
irq_set_affinity_notifier(tqp_vectors->vector_irq,
|
|
|
|
&tqp_vectors->affinity_notify);
|
|
|
|
irq_set_affinity_hint(tqp_vectors->vector_irq,
|
|
|
|
&tqp_vectors->affinity_mask);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
|
u32 mask_en)
|
|
|
|
{
|
|
|
|
writel(mask_en, tqp_vector->mask_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
|
|
|
|
{
|
|
|
|
napi_enable(&tqp_vector->napi);
|
|
|
|
|
|
|
|
/* enable vector */
|
|
|
|
hns3_mask_vector_irq(tqp_vector, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
|
|
|
|
{
|
|
|
|
/* disable vector */
|
|
|
|
hns3_mask_vector_irq(tqp_vector, 0);
|
|
|
|
|
|
|
|
disable_irq(tqp_vector->vector_irq);
|
|
|
|
napi_disable(&tqp_vector->napi);
|
|
|
|
}
|
|
|
|
|
2018-01-12 16:23:10 +08:00
|
|
|
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
|
u32 rl_value)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
2018-01-12 16:23:10 +08:00
|
|
|
u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* this defines the configuration for RL (Interrupt Rate Limiter).
|
|
|
|
* Rl defines rate of interrupts i.e. number of interrupts-per-second
|
|
|
|
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
|
|
|
|
*/
|
2018-01-12 16:23:10 +08:00
|
|
|
|
2018-03-09 10:37:03 +08:00
|
|
|
if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
|
|
|
|
!tqp_vector->rx_group.coal.gl_adapt_enable)
|
2018-01-12 16:23:10 +08:00
|
|
|
/* According to the hardware, the range of rl_reg is
|
|
|
|
* 0-59 and the unit is 4.
|
|
|
|
*/
|
|
|
|
rl_reg |= HNS3_INT_RL_ENABLE_MASK;
|
|
|
|
|
|
|
|
writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
|
u32 gl_value)
|
|
|
|
{
|
|
|
|
u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
|
|
|
|
|
|
|
|
writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
|
u32 gl_value)
|
|
|
|
{
|
|
|
|
u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
|
|
|
|
|
|
|
|
writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2018-01-12 16:23:11 +08:00
|
|
|
static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
|
struct hns3_nic_priv *priv)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
/* initialize the configuration for interrupt coalescing.
|
|
|
|
* 1. GL (Interrupt Gap Limiter)
|
|
|
|
* 2. RL (Interrupt Rate Limiter)
|
|
|
|
*/
|
|
|
|
|
2018-01-12 16:23:11 +08:00
|
|
|
/* Default: enable interrupt coalescing self-adaptive and GL */
|
2018-03-09 10:37:03 +08:00
|
|
|
tqp_vector->tx_group.coal.gl_adapt_enable = 1;
|
|
|
|
tqp_vector->rx_group.coal.gl_adapt_enable = 1;
|
2018-01-12 16:23:11 +08:00
|
|
|
|
2018-03-09 10:37:03 +08:00
|
|
|
tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
|
|
|
|
tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
|
2018-01-12 16:23:11 +08:00
|
|
|
|
2018-03-09 10:37:03 +08:00
|
|
|
tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
|
|
|
|
tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
|
struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
|
|
|
|
hns3_set_vector_coalesce_tx_gl(tqp_vector,
|
2018-03-09 10:37:03 +08:00
|
|
|
tqp_vector->tx_group.coal.int_gl);
|
2018-03-09 10:37:02 +08:00
|
|
|
hns3_set_vector_coalesce_rx_gl(tqp_vector,
|
2018-03-09 10:37:03 +08:00
|
|
|
tqp_vector->rx_group.coal.int_gl);
|
2018-03-09 10:37:02 +08:00
|
|
|
hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
|
|
|
|
}
|
|
|
|
|
2017-09-27 09:45:32 +08:00
|
|
|
static int hns3_nic_set_real_num_queue(struct net_device *netdev)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-09-27 09:45:32 +08:00
|
|
|
struct hnae3_knic_private_info *kinfo = &h->kinfo;
|
|
|
|
unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
|
2018-07-06 18:27:55 +08:00
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (kinfo->num_tc <= 1) {
|
|
|
|
netdev_reset_tc(netdev);
|
|
|
|
} else {
|
|
|
|
ret = netdev_set_num_tc(netdev, kinfo->num_tc);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"netdev_set_num_tc fail, ret=%d!\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
|
|
|
if (!kinfo->tc_info[i].enable)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
netdev_set_tc_queue(netdev,
|
|
|
|
kinfo->tc_info[i].tc,
|
|
|
|
kinfo->tc_info[i].tqp_count,
|
|
|
|
kinfo->tc_info[i].tqp_offset);
|
|
|
|
}
|
|
|
|
}
|
2017-09-27 09:45:32 +08:00
|
|
|
|
|
|
|
ret = netif_set_real_num_tx_queues(netdev, queue_size);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"netif_set_real_num_tx_queues fail, ret=%d!\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = netif_set_real_num_rx_queues(netdev, queue_size);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-08 19:41:54 +08:00
|
|
|
static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
|
|
|
|
{
|
2018-09-27 02:28:39 +08:00
|
|
|
u16 alloc_tqps, max_rss_size, rss_size;
|
2018-03-08 19:41:54 +08:00
|
|
|
|
2018-09-27 02:28:39 +08:00
|
|
|
h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
|
|
|
|
rss_size = alloc_tqps / h->kinfo.num_tc;
|
2018-03-08 19:41:54 +08:00
|
|
|
|
2018-09-27 02:28:39 +08:00
|
|
|
return min_t(u16, rss_size, max_rss_size);
|
2018-03-08 19:41:54 +08:00
|
|
|
}
|
|
|
|
|
2018-11-07 12:06:11 +08:00
|
|
|
static void hns3_tqp_enable(struct hnae3_queue *tqp)
|
|
|
|
{
|
|
|
|
u32 rcb_reg;
|
|
|
|
|
|
|
|
rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
|
|
|
|
rcb_reg |= BIT(HNS3_RING_EN_B);
|
|
|
|
hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_tqp_disable(struct hnae3_queue *tqp)
|
|
|
|
{
|
|
|
|
u32 rcb_reg;
|
|
|
|
|
|
|
|
rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
|
|
|
|
rcb_reg &= ~BIT(HNS3_RING_EN_B);
|
|
|
|
hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static int hns3_nic_net_up(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
int i, j;
|
|
|
|
int ret;
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
ret = hns3_nic_reset_all_ring(h);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* get irq resource for all vectors */
|
|
|
|
ret = hns3_nic_init_irq(priv);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-27 00:49:20 +08:00
|
|
|
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* enable the vectors */
|
|
|
|
for (i = 0; i < priv->vector_num; i++)
|
|
|
|
hns3_vector_enable(&priv->tqp_vector[i]);
|
|
|
|
|
2018-11-07 12:06:11 +08:00
|
|
|
/* enable rcb */
|
|
|
|
for (j = 0; j < h->kinfo.num_tqps; j++)
|
|
|
|
hns3_tqp_enable(h->kinfo.tqp[j]);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* start the ae_dev */
|
|
|
|
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
|
|
|
|
if (ret)
|
|
|
|
goto out_start_err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_start_err:
|
2019-01-27 00:49:20 +08:00
|
|
|
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
2018-11-07 12:06:11 +08:00
|
|
|
while (j--)
|
|
|
|
hns3_tqp_disable(h->kinfo.tqp[j]);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
for (j = i - 1; j >= 0; j--)
|
|
|
|
hns3_vector_disable(&priv->tqp_vector[j]);
|
|
|
|
|
|
|
|
hns3_nic_uninit_irq(priv);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-02-23 17:22:08 +08:00
|
|
|
static void hns3_config_xps(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->vector_num; i++) {
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
|
|
|
|
struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
|
|
|
|
|
|
|
|
while (ring) {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = netif_set_xps_queue(priv->netdev,
|
|
|
|
&tqp_vector->affinity_mask,
|
|
|
|
ring->tqp->tqp_index);
|
|
|
|
if (ret)
|
|
|
|
netdev_warn(priv->netdev,
|
|
|
|
"set xps queue failed: %d", ret);
|
|
|
|
|
|
|
|
ring = ring->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static int hns3_nic_net_open(struct net_device *netdev)
|
|
|
|
{
|
2018-12-20 11:51:58 +08:00
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
2018-07-06 18:27:55 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
|
|
struct hnae3_knic_private_info *kinfo;
|
|
|
|
int i, ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-11-07 12:06:12 +08:00
|
|
|
if (hns3_nic_resetting(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
2017-09-27 09:45:32 +08:00
|
|
|
ret = hns3_nic_set_real_num_queue(netdev);
|
|
|
|
if (ret)
|
2017-08-02 23:59:45 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = hns3_nic_net_up(netdev);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"hns net up fail, ret=%d!\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-07-06 18:27:55 +08:00
|
|
|
kinfo = &h->kinfo;
|
|
|
|
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
|
|
|
|
netdev_set_prio_tc_map(netdev, i,
|
|
|
|
kinfo->prio_tc[i]);
|
|
|
|
}
|
|
|
|
|
2018-12-20 11:51:58 +08:00
|
|
|
if (h->ae_algo->ops->set_timer_task)
|
|
|
|
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
|
|
|
|
|
2019-02-23 17:22:08 +08:00
|
|
|
hns3_config_xps(priv);
|
2017-08-02 23:59:45 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_nic_net_down(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
2018-11-07 12:06:11 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
const struct hnae3_ae_ops *ops;
|
|
|
|
int i;
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
/* disable vectors */
|
|
|
|
for (i = 0; i < priv->vector_num; i++)
|
|
|
|
hns3_vector_disable(&priv->tqp_vector[i]);
|
2018-11-07 12:06:11 +08:00
|
|
|
|
|
|
|
/* disable rcb */
|
|
|
|
for (i = 0; i < h->kinfo.num_tqps; i++)
|
|
|
|
hns3_tqp_disable(h->kinfo.tqp[i]);
|
2018-05-26 02:43:04 +08:00
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* stop ae_dev */
|
|
|
|
ops = priv->ae_handle->ae_algo->ops;
|
|
|
|
if (ops->stop)
|
|
|
|
ops->stop(priv->ae_handle);
|
|
|
|
|
|
|
|
/* free irq resources */
|
|
|
|
hns3_nic_uninit_irq(priv);
|
2018-05-26 02:43:04 +08:00
|
|
|
|
|
|
|
hns3_clear_all_ring(priv->ae_handle);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_net_stop(struct net_device *netdev)
|
|
|
|
{
|
2018-11-09 22:07:52 +08:00
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
2018-12-20 11:51:58 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2018-11-09 22:07:52 +08:00
|
|
|
|
|
|
|
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
|
|
|
|
return 0;
|
|
|
|
|
2018-12-20 11:51:58 +08:00
|
|
|
if (h->ae_algo->ops->set_timer_task)
|
|
|
|
h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
netif_tx_stop_all_queues(netdev);
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
|
|
|
hns3_nic_net_down(netdev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_uc_sync(struct net_device *netdev,
|
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
if (h->ae_algo->ops->add_uc_addr)
|
|
|
|
return h->ae_algo->ops->add_uc_addr(h, addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_uc_unsync(struct net_device *netdev,
|
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
if (h->ae_algo->ops->rm_uc_addr)
|
|
|
|
return h->ae_algo->ops->rm_uc_addr(h, addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_mc_sync(struct net_device *netdev,
|
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2017-08-10 17:56:14 +08:00
|
|
|
if (h->ae_algo->ops->add_mc_addr)
|
2017-08-02 23:59:45 +08:00
|
|
|
return h->ae_algo->ops->add_mc_addr(h, addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_mc_unsync(struct net_device *netdev,
|
|
|
|
const unsigned char *addr)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2017-08-10 17:56:14 +08:00
|
|
|
if (h->ae_algo->ops->rm_mc_addr)
|
2017-08-02 23:59:45 +08:00
|
|
|
return h->ae_algo->ops->rm_mc_addr(h, addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-12 22:34:04 +08:00
|
|
|
static u8 hns3_get_netdev_flags(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
u8 flags = 0;
|
|
|
|
|
|
|
|
if (netdev->flags & IFF_PROMISC) {
|
2019-01-27 00:49:14 +08:00
|
|
|
flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
|
2018-10-12 22:34:04 +08:00
|
|
|
} else {
|
|
|
|
flags |= HNAE3_VLAN_FLTR;
|
|
|
|
if (netdev->flags & IFF_ALLMULTI)
|
|
|
|
flags |= HNAE3_USER_MPE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2017-10-09 15:44:01 +08:00
|
|
|
static void hns3_nic_set_rx_mode(struct net_device *netdev)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2018-10-12 22:34:04 +08:00
|
|
|
u8 new_flags;
|
|
|
|
int ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-10-12 22:34:04 +08:00
|
|
|
new_flags = hns3_get_netdev_flags(netdev);
|
|
|
|
|
|
|
|
ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
|
|
|
|
if (ret) {
|
2017-08-02 23:59:45 +08:00
|
|
|
netdev_err(netdev, "sync uc address fail\n");
|
2018-10-12 22:34:04 +08:00
|
|
|
if (ret == -ENOSPC)
|
|
|
|
new_flags |= HNAE3_OVERFLOW_UPE;
|
|
|
|
}
|
|
|
|
|
2018-06-02 00:52:10 +08:00
|
|
|
if (netdev->flags & IFF_MULTICAST) {
|
2018-10-12 22:34:04 +08:00
|
|
|
ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
|
|
|
|
hns3_nic_mc_unsync);
|
|
|
|
if (ret) {
|
2017-08-02 23:59:45 +08:00
|
|
|
netdev_err(netdev, "sync mc address fail\n");
|
2018-10-12 22:34:04 +08:00
|
|
|
if (ret == -ENOSPC)
|
|
|
|
new_flags |= HNAE3_OVERFLOW_MPE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* User mode Promisc mode enable and vlan filtering is disabled to
|
|
|
|
* let all packets in. MAC-VLAN Table overflow Promisc enabled and
|
|
|
|
* vlan fitering is enabled
|
|
|
|
*/
|
|
|
|
hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
|
|
|
|
h->netdev_flags = new_flags;
|
2019-01-27 00:49:14 +08:00
|
|
|
hns3_update_promisc_mode(netdev, new_flags);
|
2018-10-12 22:34:04 +08:00
|
|
|
}
|
|
|
|
|
2018-10-30 21:50:50 +08:00
|
|
|
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
|
2018-10-12 22:34:04 +08:00
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
|
|
|
|
if (h->ae_algo->ops->set_promisc_mode) {
|
2018-10-30 21:50:50 +08:00
|
|
|
return h->ae_algo->ops->set_promisc_mode(h,
|
|
|
|
promisc_flags & HNAE3_UPE,
|
|
|
|
promisc_flags & HNAE3_MPE);
|
2018-10-12 22:34:04 +08:00
|
|
|
}
|
2018-10-30 21:50:50 +08:00
|
|
|
|
|
|
|
return 0;
|
2018-10-12 22:34:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
|
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
bool last_state;
|
|
|
|
|
|
|
|
if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
|
|
|
|
last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
|
|
|
|
if (enable != last_state) {
|
|
|
|
netdev_info(netdev,
|
|
|
|
"%s vlan filter\n",
|
|
|
|
enable ? "enable" : "disable");
|
|
|
|
h->ae_algo->ops->enable_vlan_filter(h, enable);
|
|
|
|
}
|
2018-06-02 00:52:10 +08:00
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
|
|
|
|
u16 *mss, u32 *type_cs_vlan_tso)
|
|
|
|
{
|
|
|
|
u32 l4_offset, hdr_len;
|
|
|
|
union l3_hdr_info l3;
|
|
|
|
union l4_hdr_info l4;
|
|
|
|
u32 l4_paylen;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!skb_is_gso(skb))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = skb_cow_head(skb, 0);
|
2019-02-23 17:22:12 +08:00
|
|
|
if (unlikely(ret))
|
2017-08-02 23:59:45 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
l3.hdr = skb_network_header(skb);
|
|
|
|
l4.hdr = skb_transport_header(skb);
|
|
|
|
|
|
|
|
/* Software should clear the IPv4's checksum field when tso is
|
|
|
|
* needed.
|
|
|
|
*/
|
|
|
|
if (l3.v4->version == 4)
|
|
|
|
l3.v4->check = 0;
|
|
|
|
|
|
|
|
/* tunnel packet.*/
|
|
|
|
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
|
|
|
|
SKB_GSO_GRE_CSUM |
|
|
|
|
SKB_GSO_UDP_TUNNEL |
|
|
|
|
SKB_GSO_UDP_TUNNEL_CSUM)) {
|
|
|
|
if ((!(skb_shinfo(skb)->gso_type &
|
|
|
|
SKB_GSO_PARTIAL)) &&
|
|
|
|
(skb_shinfo(skb)->gso_type &
|
|
|
|
SKB_GSO_UDP_TUNNEL_CSUM)) {
|
|
|
|
/* Software should clear the udp's checksum
|
|
|
|
* field when tso is needed.
|
|
|
|
*/
|
|
|
|
l4.udp->check = 0;
|
|
|
|
}
|
|
|
|
/* reset l3&l4 pointers from outer to inner headers */
|
|
|
|
l3.hdr = skb_inner_network_header(skb);
|
|
|
|
l4.hdr = skb_inner_transport_header(skb);
|
|
|
|
|
|
|
|
/* Software should clear the IPv4's checksum field when
|
|
|
|
* tso is needed.
|
|
|
|
*/
|
|
|
|
if (l3.v4->version == 4)
|
|
|
|
l3.v4->check = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* normal or tunnel packet*/
|
|
|
|
l4_offset = l4.hdr - skb->data;
|
2019-02-23 17:22:09 +08:00
|
|
|
hdr_len = (l4.tcp->doff << 2) + l4_offset;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
/* remove payload length from inner pseudo checksum when tso*/
|
|
|
|
l4_paylen = skb->len - l4_offset;
|
|
|
|
csum_replace_by_diff(&l4.tcp->check,
|
|
|
|
(__force __wsum)htonl(l4_paylen));
|
|
|
|
|
|
|
|
/* find the txbd field values */
|
|
|
|
*paylen = skb->len - hdr_len;
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
/* get MSS for TSO */
|
|
|
|
*mss = skb_shinfo(skb)->gso_size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-18 19:31:39 +08:00
|
|
|
static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
|
|
|
|
u8 *il4_proto)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
2019-01-31 04:55:41 +08:00
|
|
|
union l3_hdr_info l3;
|
2017-08-02 23:59:45 +08:00
|
|
|
unsigned char *l4_hdr;
|
|
|
|
unsigned char *exthdr;
|
|
|
|
u8 l4_proto_tmp;
|
|
|
|
__be16 frag_off;
|
|
|
|
|
|
|
|
/* find outer header point */
|
|
|
|
l3.hdr = skb_network_header(skb);
|
2018-05-02 02:55:58 +08:00
|
|
|
l4_hdr = skb_transport_header(skb);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
if (skb->protocol == htons(ETH_P_IPV6)) {
|
|
|
|
exthdr = l3.hdr + sizeof(*l3.v6);
|
|
|
|
l4_proto_tmp = l3.v6->nexthdr;
|
|
|
|
if (l4_hdr != exthdr)
|
|
|
|
ipv6_skip_exthdr(skb, exthdr - skb->data,
|
|
|
|
&l4_proto_tmp, &frag_off);
|
|
|
|
} else if (skb->protocol == htons(ETH_P_IP)) {
|
|
|
|
l4_proto_tmp = l3.v4->protocol;
|
2017-08-18 19:31:39 +08:00
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*ol4_proto = l4_proto_tmp;
|
|
|
|
|
|
|
|
/* tunnel packet */
|
|
|
|
if (!skb->encapsulation) {
|
|
|
|
*il4_proto = 0;
|
2017-08-18 19:31:39 +08:00
|
|
|
return 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* find inner header point */
|
|
|
|
l3.hdr = skb_inner_network_header(skb);
|
|
|
|
l4_hdr = skb_inner_transport_header(skb);
|
|
|
|
|
|
|
|
if (l3.v6->version == 6) {
|
|
|
|
exthdr = l3.hdr + sizeof(*l3.v6);
|
|
|
|
l4_proto_tmp = l3.v6->nexthdr;
|
|
|
|
if (l4_hdr != exthdr)
|
|
|
|
ipv6_skip_exthdr(skb, exthdr - skb->data,
|
|
|
|
&l4_proto_tmp, &frag_off);
|
|
|
|
} else if (l3.v4->version == 4) {
|
|
|
|
l4_proto_tmp = l3.v4->protocol;
|
|
|
|
}
|
|
|
|
|
|
|
|
*il4_proto = l4_proto_tmp;
|
2017-08-18 19:31:39 +08:00
|
|
|
|
|
|
|
return 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2018-06-02 00:52:09 +08:00
|
|
|
/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
|
|
|
|
* and it is udp packet, which has a dest port as the IANA assigned.
|
|
|
|
* the hardware is expected to do the checksum offload, but the
|
|
|
|
* hardware will not do the checksum offload when udp dest port is
|
|
|
|
* 4789.
|
|
|
|
*/
|
|
|
|
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
|
|
|
|
{
|
2019-01-31 04:55:41 +08:00
|
|
|
union l4_hdr_info l4;
|
2018-06-02 00:52:09 +08:00
|
|
|
|
|
|
|
l4.hdr = skb_transport_header(skb);
|
|
|
|
|
2019-03-22 06:51:39 +08:00
|
|
|
if (!(!skb->encapsulation &&
|
|
|
|
l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
|
2018-06-02 00:52:09 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
skb_checksum_help(skb);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-05-06 10:48:47 +08:00
|
|
|
static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
|
|
|
|
u32 *ol_type_vlan_len_msec)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
2019-05-06 10:48:47 +08:00
|
|
|
u32 l2_len, l3_len, l4_len;
|
|
|
|
unsigned char *il2_hdr;
|
2019-01-31 04:55:41 +08:00
|
|
|
union l3_hdr_info l3;
|
2019-05-06 10:48:47 +08:00
|
|
|
union l4_hdr_info l4;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
l3.hdr = skb_network_header(skb);
|
2019-05-06 10:48:47 +08:00
|
|
|
l4.hdr = skb_transport_header(skb);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:47 +08:00
|
|
|
/* compute OL2 header size, defined in 2 Bytes */
|
|
|
|
l2_len = l3.hdr - skb->data;
|
|
|
|
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
|
|
|
|
|
|
|
|
/* compute OL3 header size, defined in 4 Bytes */
|
|
|
|
l3_len = l4.hdr - l3.hdr;
|
|
|
|
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:47 +08:00
|
|
|
il2_hdr = skb_inner_mac_header(skb);
|
|
|
|
/* compute OL4 header size, defined in 4 Bytes. */
|
|
|
|
l4_len = il2_hdr - l4.hdr;
|
|
|
|
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
|
|
|
|
|
|
|
|
/* define outer network header type */
|
|
|
|
if (skb->protocol == htons(ETH_P_IP)) {
|
|
|
|
if (skb_is_gso(skb))
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*ol_type_vlan_len_msec,
|
2019-05-06 10:48:47 +08:00
|
|
|
HNS3_TXD_OL3T_S,
|
|
|
|
HNS3_OL3T_IPV4_CSUM);
|
|
|
|
else
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*ol_type_vlan_len_msec,
|
2019-05-06 10:48:47 +08:00
|
|
|
HNS3_TXD_OL3T_S,
|
|
|
|
HNS3_OL3T_IPV4_NO_CSUM);
|
|
|
|
|
|
|
|
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
|
|
|
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
|
|
|
|
HNS3_OL3T_IPV6);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ol4_proto == IPPROTO_UDP)
|
|
|
|
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
|
|
|
|
HNS3_TUN_MAC_IN_UDP);
|
|
|
|
else if (ol4_proto == IPPROTO_GRE)
|
|
|
|
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
|
|
|
|
HNS3_TUN_NVGRE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
|
|
|
|
u8 il4_proto, u32 *type_cs_vlan_tso,
|
|
|
|
u32 *ol_type_vlan_len_msec)
|
|
|
|
{
|
|
|
|
unsigned char *l2_hdr = l2_hdr = skb->data;
|
|
|
|
u32 l4_proto = ol4_proto;
|
|
|
|
union l4_hdr_info l4;
|
|
|
|
union l3_hdr_info l3;
|
|
|
|
u32 l2_len, l3_len;
|
|
|
|
|
|
|
|
l4.hdr = skb_transport_header(skb);
|
|
|
|
l3.hdr = skb_network_header(skb);
|
|
|
|
|
|
|
|
/* handle encapsulation skb */
|
|
|
|
if (skb->encapsulation) {
|
|
|
|
/* If this is a not UDP/GRE encapsulation skb */
|
|
|
|
if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
|
2017-08-02 23:59:45 +08:00
|
|
|
/* drop the skb tunnel packet if hardware don't support,
|
|
|
|
* because hardware can't calculate csum when TSO.
|
|
|
|
*/
|
|
|
|
if (skb_is_gso(skb))
|
|
|
|
return -EDOM;
|
|
|
|
|
|
|
|
/* the stack computes the IP header already,
|
|
|
|
* driver calculate l4 checksum when not TSO.
|
|
|
|
*/
|
|
|
|
skb_checksum_help(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-06 10:48:47 +08:00
|
|
|
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
|
|
|
|
|
|
|
|
/* switch to inner header */
|
|
|
|
l2_hdr = skb_inner_mac_header(skb);
|
2017-08-02 23:59:45 +08:00
|
|
|
l3.hdr = skb_inner_network_header(skb);
|
2019-05-06 10:48:47 +08:00
|
|
|
l4.hdr = skb_inner_transport_header(skb);
|
2017-08-02 23:59:45 +08:00
|
|
|
l4_proto = il4_proto;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (l3.v4->version == 4) {
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
|
|
|
|
HNS3_L3T_IPV4);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
/* the stack computes the IP header already, the only time we
|
|
|
|
* need the hardware to recompute it is in the case of TSO.
|
|
|
|
*/
|
|
|
|
if (skb_is_gso(skb))
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
|
2017-08-02 23:59:45 +08:00
|
|
|
} else if (l3.v6->version == 6) {
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
|
|
|
|
HNS3_L3T_IPV6);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2019-05-06 10:48:47 +08:00
|
|
|
/* compute inner(/normal) L2 header size, defined in 2 Bytes */
|
|
|
|
l2_len = l3.hdr - l2_hdr;
|
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
|
|
|
|
|
|
|
|
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
|
|
|
|
l3_len = l4.hdr - l3.hdr;
|
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
|
|
|
|
|
|
|
|
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
|
2017-08-02 23:59:45 +08:00
|
|
|
switch (l4_proto) {
|
|
|
|
case IPPROTO_TCP:
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
|
|
|
|
HNS3_L4T_TCP);
|
2019-05-06 10:48:47 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
|
|
|
|
l4.tcp->doff);
|
2017-08-02 23:59:45 +08:00
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
2018-06-02 00:52:09 +08:00
|
|
|
if (hns3_tunnel_csum_bug(skb))
|
|
|
|
break;
|
|
|
|
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
|
|
|
|
HNS3_L4T_UDP);
|
2019-05-06 10:48:47 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
|
|
|
|
(sizeof(struct udphdr) >> 2));
|
2017-08-02 23:59:45 +08:00
|
|
|
break;
|
|
|
|
case IPPROTO_SCTP:
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
|
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
|
|
|
|
HNS3_L4T_SCTP);
|
2019-05-06 10:48:47 +08:00
|
|
|
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
|
|
|
|
(sizeof(struct sctphdr) >> 2));
|
2017-08-02 23:59:45 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* drop the skb tunnel packet if hardware don't support,
|
|
|
|
* because hardware can't calculate csum when TSO.
|
|
|
|
*/
|
|
|
|
if (skb_is_gso(skb))
|
|
|
|
return -EDOM;
|
|
|
|
|
|
|
|
/* the stack computes the IP header already,
|
|
|
|
* driver calculate l4 checksum when not TSO.
|
|
|
|
*/
|
|
|
|
skb_checksum_help(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
|
|
|
|
{
|
|
|
|
/* Config bd buffer end */
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
|
|
|
|
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2017-12-22 12:21:48 +08:00
|
|
|
static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
|
|
|
struct hns3_enet_ring *tx_ring,
|
|
|
|
u32 *inner_vlan_flag,
|
|
|
|
u32 *out_vlan_flag,
|
|
|
|
u16 *inner_vtag,
|
|
|
|
u16 *out_vtag)
|
|
|
|
{
|
|
|
|
#define HNS3_TX_VLAN_PRIO_SHIFT 13
|
|
|
|
|
net: hns3: fix VLAN offload handle for VLAN inserted by port
Currently, in TX direction, driver implements the TX VLAN offload
by checking the VLAN header in skb, and filling it into TX descriptor.
Usually it works well, but if enable inserting VLAN header based on
port, it may conflict when out_tag field of TX descriptor is already
used, and cause RAS error.
In RX direction, hardware supports stripping max two VLAN headers.
For vlan_tci in skb can only store one VLAN tag, when RX VLAN offload
enabled, driver tells hardware to strip one VLAN header from RX
packet; when RX VLAN offload disabled, driver tells hardware not to
strip VLAN header from RX packet. Now if port based insert VLAN
enabled, all RX packets will have the port based VLAN header. This
header is useless for stack, driver needs to ask hardware to strip
it. Unfortunately, hardware can't drop this VLAN header, and always
fill it into RX descriptor, so driver has to identify and drop it.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-14 09:47:36 +08:00
|
|
|
struct hnae3_handle *handle = tx_ring->tqp->handle;
|
|
|
|
|
|
|
|
/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
|
|
|
|
* header is allowed in skb, otherwise it will cause RAS error.
|
|
|
|
*/
|
|
|
|
if (unlikely(skb_vlan_tagged_multi(skb) &&
|
|
|
|
handle->port_base_vlan_state ==
|
|
|
|
HNAE3_PORT_BASE_VLAN_ENABLE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-12-22 12:21:48 +08:00
|
|
|
if (skb->protocol == htons(ETH_P_8021Q) &&
|
|
|
|
!(tx_ring->tqp->handle->kinfo.netdev->features &
|
|
|
|
NETIF_F_HW_VLAN_CTAG_TX)) {
|
|
|
|
/* When HW VLAN acceleration is turned off, and the stack
|
|
|
|
* sets the protocol to 802.1q, the driver just need to
|
|
|
|
* set the protocol to the encapsulated ethertype.
|
|
|
|
*/
|
|
|
|
skb->protocol = vlan_get_protocol(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skb_vlan_tag_present(skb)) {
|
|
|
|
u16 vlan_tag;
|
|
|
|
|
|
|
|
vlan_tag = skb_vlan_tag_get(skb);
|
|
|
|
vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
|
|
|
|
|
|
|
|
/* Based on hw strategy, use out_vtag in two layer tag case,
|
|
|
|
* and use inner_vtag in one tag case.
|
|
|
|
*/
|
|
|
|
if (skb->protocol == htons(ETH_P_8021Q)) {
|
net: hns3: fix VLAN offload handle for VLAN inserted by port
Currently, in TX direction, driver implements the TX VLAN offload
by checking the VLAN header in skb, and filling it into TX descriptor.
Usually it works well, but if enable inserting VLAN header based on
port, it may conflict when out_tag field of TX descriptor is already
used, and cause RAS error.
In RX direction, hardware supports stripping max two VLAN headers.
For vlan_tci in skb can only store one VLAN tag, when RX VLAN offload
enabled, driver tells hardware to strip one VLAN header from RX
packet; when RX VLAN offload disabled, driver tells hardware not to
strip VLAN header from RX packet. Now if port based insert VLAN
enabled, all RX packets will have the port based VLAN header. This
header is useless for stack, driver needs to ask hardware to strip
it. Unfortunately, hardware can't drop this VLAN header, and always
fill it into RX descriptor, so driver has to identify and drop it.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-14 09:47:36 +08:00
|
|
|
if (handle->port_base_vlan_state ==
|
|
|
|
HNAE3_PORT_BASE_VLAN_DISABLE){
|
|
|
|
hns3_set_field(*out_vlan_flag,
|
|
|
|
HNS3_TXD_OVLAN_B, 1);
|
|
|
|
*out_vtag = vlan_tag;
|
|
|
|
} else {
|
|
|
|
hns3_set_field(*inner_vlan_flag,
|
|
|
|
HNS3_TXD_VLAN_B, 1);
|
|
|
|
*inner_vtag = vlan_tag;
|
|
|
|
}
|
2017-12-22 12:21:48 +08:00
|
|
|
} else {
|
2019-02-23 17:22:13 +08:00
|
|
|
hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
|
2017-12-22 12:21:48 +08:00
|
|
|
*inner_vtag = vlan_tag;
|
|
|
|
}
|
|
|
|
} else if (skb->protocol == htons(ETH_P_8021Q)) {
|
|
|
|
struct vlan_ethhdr *vhdr;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = skb_cow_head(skb, 0);
|
2019-02-23 17:22:12 +08:00
|
|
|
if (unlikely(rc < 0))
|
2017-12-22 12:21:48 +08:00
|
|
|
return rc;
|
|
|
|
vhdr = (struct vlan_ethhdr *)skb->data;
|
|
|
|
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
|
|
|
|
<< HNS3_TX_VLAN_PRIO_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->protocol = vlan_get_protocol(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
2018-10-16 19:58:49 +08:00
|
|
|
int size, int frag_end, enum hns_desc_type type)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
|
|
|
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
|
2018-10-16 19:58:49 +08:00
|
|
|
struct device *dev = ring_to_dev(ring);
|
|
|
|
struct skb_frag_struct *frag;
|
2018-10-16 19:58:50 +08:00
|
|
|
unsigned int frag_buf_num;
|
2019-02-23 17:22:10 +08:00
|
|
|
int k, sizeoflast;
|
2018-10-16 19:58:49 +08:00
|
|
|
dma_addr_t dma;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
if (type == DESC_TYPE_SKB) {
|
2019-02-23 17:22:10 +08:00
|
|
|
struct sk_buff *skb = (struct sk_buff *)priv;
|
|
|
|
u32 ol_type_vlan_len_msec = 0;
|
|
|
|
u32 type_cs_vlan_tso = 0;
|
|
|
|
u32 paylen = skb->len;
|
|
|
|
u16 inner_vtag = 0;
|
|
|
|
u16 out_vtag = 0;
|
|
|
|
u16 mss = 0;
|
|
|
|
int ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2017-12-22 12:21:48 +08:00
|
|
|
ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
|
|
|
|
&ol_type_vlan_len_msec,
|
|
|
|
&inner_vtag, &out_vtag);
|
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
2019-02-23 17:22:10 +08:00
|
|
|
u8 ol4_proto, il4_proto;
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
skb_reset_mac_len(skb);
|
|
|
|
|
2017-08-18 19:31:39 +08:00
|
|
|
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
|
2019-02-23 17:22:12 +08:00
|
|
|
if (unlikely(ret))
|
2017-08-18 19:31:39 +08:00
|
|
|
return ret;
|
2019-05-06 10:48:47 +08:00
|
|
|
|
|
|
|
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
|
|
|
|
&type_cs_vlan_tso,
|
|
|
|
&ol_type_vlan_len_msec);
|
2019-02-23 17:22:12 +08:00
|
|
|
if (unlikely(ret))
|
2017-08-02 23:59:45 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = hns3_set_tso(skb, &paylen, &mss,
|
|
|
|
&type_cs_vlan_tso);
|
2019-02-23 17:22:12 +08:00
|
|
|
if (unlikely(ret))
|
2017-08-02 23:59:45 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set txbd */
|
|
|
|
desc->tx.ol_type_vlan_len_msec =
|
|
|
|
cpu_to_le32(ol_type_vlan_len_msec);
|
|
|
|
desc->tx.type_cs_vlan_tso_len =
|
|
|
|
cpu_to_le32(type_cs_vlan_tso);
|
2017-10-09 15:44:00 +08:00
|
|
|
desc->tx.paylen = cpu_to_le32(paylen);
|
2017-08-02 23:59:45 +08:00
|
|
|
desc->tx.mss = cpu_to_le16(mss);
|
2017-12-22 12:21:48 +08:00
|
|
|
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
|
|
|
|
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
|
2018-10-16 19:58:49 +08:00
|
|
|
|
|
|
|
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
|
|
|
} else {
|
|
|
|
frag = (struct skb_frag_struct *)priv;
|
|
|
|
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
|
|
|
|
}
|
|
|
|
|
2019-02-23 17:22:12 +08:00
|
|
|
if (unlikely(dma_mapping_error(ring->dev, dma))) {
|
2018-10-16 19:58:49 +08:00
|
|
|
ring->stats.sw_err_cnt++;
|
|
|
|
return -ENOMEM;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2018-10-16 19:58:52 +08:00
|
|
|
desc_cb->length = size;
|
|
|
|
|
2019-04-04 16:17:53 +08:00
|
|
|
if (likely(size <= HNS3_MAX_BD_SIZE)) {
|
|
|
|
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
|
|
|
|
|
|
|
desc_cb->priv = priv;
|
|
|
|
desc_cb->dma = dma;
|
|
|
|
desc_cb->type = type;
|
|
|
|
desc->addr = cpu_to_le64(dma);
|
|
|
|
desc->tx.send_size = cpu_to_le16(size);
|
|
|
|
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
|
|
|
|
desc->tx.bdtp_fe_sc_vld_ra_ri =
|
|
|
|
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
|
|
|
|
|
|
|
|
ring_ptr_move_fw(ring, next_to_use);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-21 11:28:43 +08:00
|
|
|
frag_buf_num = hns3_tx_bd_count(size);
|
2019-02-23 17:22:09 +08:00
|
|
|
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
|
2018-10-16 19:58:50 +08:00
|
|
|
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
|
|
|
|
|
|
|
|
/* When frag size is bigger than hardware limit, split this frag */
|
|
|
|
for (k = 0; k < frag_buf_num; k++) {
|
2019-04-04 16:17:53 +08:00
|
|
|
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
|
|
|
|
2018-10-16 19:58:50 +08:00
|
|
|
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
|
|
|
desc_cb->priv = priv;
|
|
|
|
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
|
|
|
|
desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
|
|
|
|
DESC_TYPE_SKB : DESC_TYPE_PAGE;
|
|
|
|
|
|
|
|
/* now, fill the descriptor */
|
|
|
|
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
|
2018-10-16 19:58:52 +08:00
|
|
|
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
|
|
|
|
(u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
|
2018-10-16 19:58:50 +08:00
|
|
|
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
|
|
|
|
frag_end && (k == frag_buf_num - 1) ?
|
|
|
|
1 : 0);
|
|
|
|
desc->tx.bdtp_fe_sc_vld_ra_ri =
|
|
|
|
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
|
|
|
|
|
|
|
|
/* move ring pointer to next.*/
|
|
|
|
ring_ptr_move_fw(ring, next_to_use);
|
|
|
|
|
|
|
|
desc_cb = &ring->desc_cb[ring->next_to_use];
|
|
|
|
desc = &ring->desc[ring->next_to_use];
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
static int hns3_nic_bd_num(struct sk_buff *skb)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
2019-05-06 10:48:41 +08:00
|
|
|
int size = skb_headlen(skb);
|
|
|
|
int i, bd_num;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
/* if the total len is within the max bd limit */
|
|
|
|
if (likely(skb->len <= HNS3_MAX_BD_SIZE))
|
|
|
|
return skb_shinfo(skb)->nr_frags + 1;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
bd_num = hns3_tx_bd_count(size);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
|
|
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
int frag_bd_num;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
size = skb_frag_size(frag);
|
|
|
|
frag_bd_num = hns3_tx_bd_count(size);
|
|
|
|
|
|
|
|
if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
|
2019-01-27 00:49:19 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
bd_num += frag_bd_num;
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
return bd_num;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2019-05-06 10:48:44 +08:00
|
|
|
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (!skb->encapsulation)
|
|
|
|
return skb_transport_offset(skb) + tcp_hdrlen(skb);
|
|
|
|
|
|
|
|
return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* HW need every continuous 8 buffer data to be larger than MSS,
|
|
|
|
* we simplify it by ensuring skb_headlen + the first continuous
|
|
|
|
* 7 frags to to be larger than gso header len + mss, and the remaining
|
|
|
|
* continuous 7 frags to be larger than MSS except the last 7 frags.
|
|
|
|
*/
|
|
|
|
static bool hns3_skb_need_linearized(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
|
|
|
|
unsigned int tot_len = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < bd_limit; i++)
|
|
|
|
tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
|
|
|
|
|
|
|
|
/* ensure headlen + the first 7 frags is greater than mss + header
|
|
|
|
* and the first 7 frags is greater than mss.
|
|
|
|
*/
|
|
|
|
if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
|
|
|
|
hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* ensure the remaining continuous 7 buffer is greater than mss */
|
|
|
|
for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
|
|
|
|
tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
|
|
|
|
tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
|
|
|
|
|
|
|
|
if (tot_len < skb_shinfo(skb)->gso_size)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
|
|
|
struct sk_buff **out_skb)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *skb = *out_skb;
|
2019-05-06 10:48:41 +08:00
|
|
|
int bd_num;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
bd_num = hns3_nic_bd_num(skb);
|
|
|
|
if (bd_num < 0)
|
|
|
|
return bd_num;
|
|
|
|
|
|
|
|
if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
|
|
|
|
struct sk_buff *new_skb;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:44 +08:00
|
|
|
if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
|
|
|
|
goto out;
|
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
bd_num = hns3_tx_bd_count(skb->len);
|
|
|
|
if (unlikely(ring_space(ring) < bd_num))
|
2019-01-27 00:49:19 +08:00
|
|
|
return -EBUSY;
|
|
|
|
/* manual split the send packet */
|
|
|
|
new_skb = skb_copy(skb, GFP_ATOMIC);
|
|
|
|
if (!new_skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
*out_skb = new_skb;
|
2019-05-06 10:48:41 +08:00
|
|
|
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.tx_copy++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
2019-01-27 00:49:19 +08:00
|
|
|
}
|
|
|
|
|
2019-05-06 10:48:44 +08:00
|
|
|
out:
|
2019-05-06 10:48:41 +08:00
|
|
|
if (unlikely(ring_space(ring) < bd_num))
|
2017-08-02 23:59:45 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
return bd_num;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2018-10-16 19:58:51 +08:00
|
|
|
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct device *dev = ring_to_dev(ring);
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ring->desc_num; i++) {
|
|
|
|
/* check if this is where we started */
|
|
|
|
if (ring->next_to_use == next_to_use_orig)
|
|
|
|
break;
|
|
|
|
|
2019-05-06 10:48:48 +08:00
|
|
|
/* rollback one */
|
|
|
|
ring_ptr_move_bw(ring, next_to_use);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* unmap the descriptor dma address */
|
|
|
|
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
|
|
|
|
dma_unmap_single(dev,
|
|
|
|
ring->desc_cb[ring->next_to_use].dma,
|
|
|
|
ring->desc_cb[ring->next_to_use].length,
|
|
|
|
DMA_TO_DEVICE);
|
2018-10-16 19:58:52 +08:00
|
|
|
else if (ring->desc_cb[ring->next_to_use].length)
|
2017-08-02 23:59:45 +08:00
|
|
|
dma_unmap_page(dev,
|
|
|
|
ring->desc_cb[ring->next_to_use].dma,
|
|
|
|
ring->desc_cb[ring->next_to_use].length,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
2018-10-16 19:58:52 +08:00
|
|
|
ring->desc_cb[ring->next_to_use].length = 0;
|
2019-05-06 10:48:48 +08:00
|
|
|
ring->desc_cb[ring->next_to_use].dma = 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-20 10:19:21 +08:00
|
|
|
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
|
struct hns3_nic_ring_data *ring_data =
|
|
|
|
&tx_ring_data(priv, skb->queue_mapping);
|
|
|
|
struct hns3_enet_ring *ring = ring_data->ring;
|
|
|
|
struct netdev_queue *dev_queue;
|
|
|
|
struct skb_frag_struct *frag;
|
|
|
|
int next_to_use_head;
|
|
|
|
int buf_num;
|
|
|
|
int seg_num;
|
|
|
|
int size;
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Prefetch the data used later */
|
|
|
|
prefetch(skb->data);
|
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
|
|
|
|
if (unlikely(buf_num <= 0)) {
|
|
|
|
if (buf_num == -EBUSY) {
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.tx_busy++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
goto out_net_tx_busy;
|
|
|
|
} else if (buf_num == -ENOMEM) {
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.sw_err_cnt++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:41 +08:00
|
|
|
if (net_ratelimit())
|
|
|
|
netdev_err(netdev, "xmit error: %d!\n", buf_num);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
goto out_err_tx_ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No. of segments (plus a header) */
|
|
|
|
seg_num = skb_shinfo(skb)->nr_frags + 1;
|
|
|
|
/* Fill the first part */
|
|
|
|
size = skb_headlen(skb);
|
|
|
|
|
|
|
|
next_to_use_head = ring->next_to_use;
|
|
|
|
|
2019-02-23 17:22:11 +08:00
|
|
|
ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
|
|
|
|
DESC_TYPE_SKB);
|
2019-02-23 17:22:12 +08:00
|
|
|
if (unlikely(ret))
|
2019-05-06 10:48:48 +08:00
|
|
|
goto fill_err;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
/* Fill the fragments */
|
|
|
|
for (i = 1; i < seg_num; i++) {
|
|
|
|
frag = &skb_shinfo(skb)->frags[i - 1];
|
|
|
|
size = skb_frag_size(frag);
|
2018-10-16 19:58:49 +08:00
|
|
|
|
2019-02-23 17:22:11 +08:00
|
|
|
ret = hns3_fill_desc(ring, frag, size,
|
|
|
|
seg_num - 1 == i ? 1 : 0,
|
|
|
|
DESC_TYPE_PAGE);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-02-23 17:22:12 +08:00
|
|
|
if (unlikely(ret))
|
2019-05-06 10:48:48 +08:00
|
|
|
goto fill_err;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Complete translate all packets */
|
|
|
|
dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
|
|
|
|
netdev_tx_sent_queue(dev_queue, skb->len);
|
|
|
|
|
|
|
|
wmb(); /* Commit all data before submit */
|
|
|
|
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_queue_xmit(ring->tqp, buf_num);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
2019-05-06 10:48:48 +08:00
|
|
|
fill_err:
|
2018-10-16 19:58:51 +08:00
|
|
|
hns3_clear_desc(ring, next_to_use_head);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
out_err_tx_ok:
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
out_net_tx_busy:
|
|
|
|
netif_stop_subqueue(netdev, ring_data->queue_index);
|
|
|
|
smp_mb(); /* Commit all data before submit */
|
|
|
|
|
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
struct sockaddr *mac_addr = p;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
2018-06-02 00:52:03 +08:00
|
|
|
if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
|
|
|
|
netdev_info(netdev, "already using mac address %pM\n",
|
|
|
|
mac_addr->sa_data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-10 11:29:22 +08:00
|
|
|
ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
|
2017-08-02 23:59:45 +08:00
|
|
|
if (ret) {
|
|
|
|
netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-20 01:29:51 +08:00
|
|
|
static int hns3_nic_do_ioctl(struct net_device *netdev,
|
|
|
|
struct ifreq *ifr, int cmd)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
|
|
|
|
|
|
if (!netif_running(netdev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!h->ae_algo->ops->do_ioctl)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static int hns3_nic_set_features(struct net_device *netdev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
2018-01-12 16:23:16 +08:00
|
|
|
netdev_features_t changed = netdev->features ^ features;
|
2017-08-02 23:59:45 +08:00
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
2017-12-22 12:21:47 +08:00
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
2019-02-02 22:39:28 +08:00
|
|
|
bool enable;
|
2017-12-22 12:21:47 +08:00
|
|
|
int ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-11-15 17:29:23 +08:00
|
|
|
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
|
2019-02-02 22:39:28 +08:00
|
|
|
enable = !!(features & NETIF_F_GRO_HW);
|
|
|
|
ret = h->ae_algo->ops->set_gro_en(h, enable);
|
2018-11-15 17:29:23 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-12 16:23:17 +08:00
|
|
|
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
|
|
|
|
h->ae_algo->ops->enable_vlan_filter) {
|
2019-02-02 22:39:28 +08:00
|
|
|
enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
|
|
|
|
h->ae_algo->ops->enable_vlan_filter(h, enable);
|
2018-01-12 16:23:16 +08:00
|
|
|
}
|
2018-01-05 18:18:05 +08:00
|
|
|
|
2018-01-12 16:23:17 +08:00
|
|
|
if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
|
|
|
|
h->ae_algo->ops->enable_hw_strip_rxvtag) {
|
2019-02-02 22:39:28 +08:00
|
|
|
enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
|
|
|
|
ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
|
2017-12-22 12:21:47 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:46:47 +08:00
|
|
|
if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
|
2019-02-02 22:39:28 +08:00
|
|
|
enable = !!(features & NETIF_F_NTUPLE);
|
|
|
|
h->ae_algo->ops->enable_fd(h, enable);
|
2018-10-01 19:46:47 +08:00
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
netdev->features = features;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-09 14:50:59 +08:00
|
|
|
static void hns3_nic_get_stats64(struct net_device *netdev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
|
int queue_num = priv->ae_handle->kinfo.num_tqps;
|
2018-01-05 18:18:10 +08:00
|
|
|
struct hnae3_handle *handle = priv->ae_handle;
|
2017-08-02 23:59:45 +08:00
|
|
|
struct hns3_enet_ring *ring;
|
2019-01-23 07:39:29 +08:00
|
|
|
u64 rx_length_errors = 0;
|
|
|
|
u64 rx_crc_errors = 0;
|
|
|
|
u64 rx_multicast = 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
unsigned int start;
|
2019-01-23 07:39:29 +08:00
|
|
|
u64 tx_errors = 0;
|
|
|
|
u64 rx_errors = 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
unsigned int idx;
|
|
|
|
u64 tx_bytes = 0;
|
|
|
|
u64 rx_bytes = 0;
|
|
|
|
u64 tx_pkts = 0;
|
|
|
|
u64 rx_pkts = 0;
|
2018-01-05 18:18:12 +08:00
|
|
|
u64 tx_drop = 0;
|
|
|
|
u64 rx_drop = 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-01-05 18:18:11 +08:00
|
|
|
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
|
|
|
|
return;
|
|
|
|
|
2018-01-05 18:18:10 +08:00
|
|
|
handle->ae_algo->ops->update_stats(handle, &netdev->stats);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
for (idx = 0; idx < queue_num; idx++) {
|
|
|
|
/* fetch the tx stats */
|
|
|
|
ring = priv->ring_data[idx].ring;
|
|
|
|
do {
|
2017-08-18 19:31:37 +08:00
|
|
|
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
2017-08-02 23:59:45 +08:00
|
|
|
tx_bytes += ring->stats.tx_bytes;
|
|
|
|
tx_pkts += ring->stats.tx_pkts;
|
2018-01-05 18:18:12 +08:00
|
|
|
tx_drop += ring->stats.sw_err_cnt;
|
2019-01-23 07:39:29 +08:00
|
|
|
tx_errors += ring->stats.sw_err_cnt;
|
2017-08-02 23:59:45 +08:00
|
|
|
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
|
|
|
|
|
|
|
/* fetch the rx stats */
|
|
|
|
ring = priv->ring_data[idx + queue_num].ring;
|
|
|
|
do {
|
2017-08-18 19:31:37 +08:00
|
|
|
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
2017-08-02 23:59:45 +08:00
|
|
|
rx_bytes += ring->stats.rx_bytes;
|
|
|
|
rx_pkts += ring->stats.rx_pkts;
|
2018-01-05 18:18:12 +08:00
|
|
|
rx_drop += ring->stats.non_vld_descs;
|
|
|
|
rx_drop += ring->stats.l2_err;
|
2019-01-23 07:39:29 +08:00
|
|
|
rx_errors += ring->stats.non_vld_descs;
|
|
|
|
rx_errors += ring->stats.l2_err;
|
|
|
|
rx_crc_errors += ring->stats.l2_err;
|
|
|
|
rx_crc_errors += ring->stats.l3l4_csum_err;
|
|
|
|
rx_multicast += ring->stats.rx_multicast;
|
|
|
|
rx_length_errors += ring->stats.err_pkt_len;
|
2017-08-02 23:59:45 +08:00
|
|
|
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
|
|
|
}
|
|
|
|
|
|
|
|
stats->tx_bytes = tx_bytes;
|
|
|
|
stats->tx_packets = tx_pkts;
|
|
|
|
stats->rx_bytes = rx_bytes;
|
|
|
|
stats->rx_packets = rx_pkts;
|
|
|
|
|
2019-01-23 07:39:29 +08:00
|
|
|
stats->rx_errors = rx_errors;
|
|
|
|
stats->multicast = rx_multicast;
|
|
|
|
stats->rx_length_errors = rx_length_errors;
|
|
|
|
stats->rx_crc_errors = rx_crc_errors;
|
2017-08-02 23:59:45 +08:00
|
|
|
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
|
|
|
|
|
2019-01-23 07:39:29 +08:00
|
|
|
stats->tx_errors = tx_errors;
|
|
|
|
stats->rx_dropped = rx_drop;
|
|
|
|
stats->tx_dropped = tx_drop;
|
2017-08-02 23:59:45 +08:00
|
|
|
stats->collisions = netdev->stats.collisions;
|
|
|
|
stats->rx_over_errors = netdev->stats.rx_over_errors;
|
|
|
|
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
|
|
|
|
stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
|
|
|
|
stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
|
|
|
|
stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
|
|
|
|
stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
|
|
|
|
stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
|
|
|
|
stats->tx_window_errors = netdev->stats.tx_window_errors;
|
|
|
|
stats->rx_compressed = netdev->stats.rx_compressed;
|
|
|
|
stats->tx_compressed = netdev->stats.tx_compressed;
|
|
|
|
}
|
|
|
|
|
2017-10-17 14:51:30 +08:00
|
|
|
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
2017-10-17 14:51:30 +08:00
|
|
|
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
struct hnae3_knic_private_info *kinfo = &h->kinfo;
|
2017-10-17 14:51:30 +08:00
|
|
|
u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
|
|
|
|
u8 tc = mqprio_qopt->qopt.num_tc;
|
|
|
|
u16 mode = mqprio_qopt->mode;
|
|
|
|
u8 hw = mqprio_qopt->qopt.hw;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2017-10-17 14:51:30 +08:00
|
|
|
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
|
|
|
|
mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
if (tc > HNAE3_MAX_TC)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-27 00:49:18 +08:00
|
|
|
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
|
2017-10-17 14:51:30 +08:00
|
|
|
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2017-08-07 16:15:17 +08:00
|
|
|
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
2017-08-07 16:15:32 +08:00
|
|
|
void *type_data)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
2017-11-06 14:23:42 +08:00
|
|
|
if (type != TC_SETUP_QDISC_MQPRIO)
|
2017-08-07 16:15:31 +08:00
|
|
|
return -EOPNOTSUPP;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2017-10-17 14:51:30 +08:00
|
|
|
return hns3_setup_tc(dev, type_data);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_vlan_rx_add_vid(struct net_device *netdev,
|
|
|
|
__be16 proto, u16 vid)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2018-03-21 15:49:22 +08:00
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
int ret = -EIO;
|
|
|
|
|
|
|
|
if (h->ae_algo->ops->set_vlan_filter)
|
|
|
|
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
|
|
|
|
|
2018-03-21 15:49:22 +08:00
|
|
|
if (!ret)
|
|
|
|
set_bit(vid, priv->active_vlans);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
|
|
|
|
__be16 proto, u16 vid)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2018-03-21 15:49:22 +08:00
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
int ret = -EIO;
|
|
|
|
|
|
|
|
if (h->ae_algo->ops->set_vlan_filter)
|
|
|
|
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
|
|
|
|
|
2018-03-21 15:49:22 +08:00
|
|
|
if (!ret)
|
|
|
|
clear_bit(vid, priv->active_vlans);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-10-30 21:50:50 +08:00
|
|
|
static int hns3_restore_vlan(struct net_device *netdev)
|
2018-03-21 15:49:22 +08:00
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
2018-10-30 21:50:50 +08:00
|
|
|
int ret = 0;
|
2018-03-21 15:49:22 +08:00
|
|
|
u16 vid;
|
|
|
|
|
|
|
|
for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
|
|
|
|
ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
|
2018-10-30 21:50:50 +08:00
|
|
|
if (ret) {
|
|
|
|
netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
|
|
|
|
vid, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-03-21 15:49:22 +08:00
|
|
|
}
|
2018-10-30 21:50:50 +08:00
|
|
|
|
|
|
|
return ret;
|
2018-03-21 15:49:22 +08:00
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
|
|
|
|
u8 qos, __be16 vlan_proto)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
int ret = -EIO;
|
|
|
|
|
|
|
|
if (h->ae_algo->ops->set_vf_vlan_filter)
|
|
|
|
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
|
|
|
|
qos, vlan_proto);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-08-22 00:05:24 +08:00
|
|
|
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
|
{
|
2017-10-09 15:43:56 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
2017-08-22 00:05:24 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-04-04 16:17:57 +08:00
|
|
|
if (hns3_nic_resetting(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2017-08-22 00:05:24 +08:00
|
|
|
if (!h->ae_algo->ops->set_mtu)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
|
2018-09-27 02:28:37 +08:00
|
|
|
if (ret)
|
2017-08-22 00:05:24 +08:00
|
|
|
netdev_err(netdev, "failed to change MTU in hardware %d\n",
|
|
|
|
ret);
|
2018-09-27 02:28:37 +08:00
|
|
|
else
|
|
|
|
netdev->mtu = new_mtu;
|
2018-01-05 18:18:20 +08:00
|
|
|
|
2017-08-22 00:05:24 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-11-02 20:45:20 +08:00
|
|
|
static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
2019-04-19 11:05:42 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(ndev);
|
2017-11-02 20:45:20 +08:00
|
|
|
struct hns3_enet_ring *tx_ring = NULL;
|
2019-04-19 11:05:42 +08:00
|
|
|
struct napi_struct *napi;
|
2017-11-02 20:45:20 +08:00
|
|
|
int timeout_queue = 0;
|
|
|
|
int hw_head, hw_tail;
|
2019-04-19 11:05:42 +08:00
|
|
|
int fbd_num, fbd_oft;
|
|
|
|
int ebd_num, ebd_oft;
|
|
|
|
int bd_num, bd_err;
|
|
|
|
int ring_en, tc;
|
2017-11-02 20:45:20 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Find the stopped queue the same way the stack does */
|
2019-04-19 11:05:41 +08:00
|
|
|
for (i = 0; i < ndev->num_tx_queues; i++) {
|
2017-11-02 20:45:20 +08:00
|
|
|
struct netdev_queue *q;
|
|
|
|
unsigned long trans_start;
|
|
|
|
|
|
|
|
q = netdev_get_tx_queue(ndev, i);
|
|
|
|
trans_start = q->trans_start;
|
|
|
|
if (netif_xmit_stopped(q) &&
|
|
|
|
time_after(jiffies,
|
|
|
|
(trans_start + ndev->watchdog_timeo))) {
|
|
|
|
timeout_queue = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == ndev->num_tx_queues) {
|
|
|
|
netdev_info(ndev,
|
|
|
|
"no netdev TX timeout queue found, timeout count: %llu\n",
|
|
|
|
priv->tx_timeout_count);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-19 11:05:40 +08:00
|
|
|
priv->tx_timeout_count++;
|
|
|
|
|
2017-11-02 20:45:20 +08:00
|
|
|
tx_ring = priv->ring_data[timeout_queue].ring;
|
2019-04-19 11:05:42 +08:00
|
|
|
napi = &tx_ring->tqp_vector->napi;
|
|
|
|
|
|
|
|
netdev_info(ndev,
|
|
|
|
"tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
|
|
|
|
priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
|
|
|
|
tx_ring->next_to_clean, napi->state);
|
|
|
|
|
|
|
|
netdev_info(ndev,
|
|
|
|
"tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
|
|
|
|
tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
|
|
|
|
tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
|
|
|
|
|
|
|
|
netdev_info(ndev,
|
|
|
|
"seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
|
|
|
|
tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
|
|
|
|
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
|
|
|
|
|
|
|
|
/* When mac received many pause frames continuous, it's unable to send
|
|
|
|
* packets, which may cause tx timeout
|
|
|
|
*/
|
|
|
|
if (h->ae_algo->ops->update_stats &&
|
|
|
|
h->ae_algo->ops->get_mac_pause_stats) {
|
|
|
|
u64 tx_pause_cnt, rx_pause_cnt;
|
|
|
|
|
|
|
|
h->ae_algo->ops->update_stats(h, &ndev->stats);
|
|
|
|
h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
|
|
|
|
&rx_pause_cnt);
|
|
|
|
netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
|
|
|
|
tx_pause_cnt, rx_pause_cnt);
|
|
|
|
}
|
2017-11-02 20:45:20 +08:00
|
|
|
|
|
|
|
hw_head = readl_relaxed(tx_ring->tqp->io_base +
|
|
|
|
HNS3_RING_TX_RING_HEAD_REG);
|
|
|
|
hw_tail = readl_relaxed(tx_ring->tqp->io_base +
|
|
|
|
HNS3_RING_TX_RING_TAIL_REG);
|
2019-04-19 11:05:42 +08:00
|
|
|
fbd_num = readl_relaxed(tx_ring->tqp->io_base +
|
|
|
|
HNS3_RING_TX_RING_FBDNUM_REG);
|
|
|
|
fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
|
|
|
|
HNS3_RING_TX_RING_OFFSET_REG);
|
|
|
|
ebd_num = readl_relaxed(tx_ring->tqp->io_base +
|
|
|
|
HNS3_RING_TX_RING_EBDNUM_REG);
|
|
|
|
ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
|
|
|
|
HNS3_RING_TX_RING_EBD_OFFSET_REG);
|
|
|
|
bd_num = readl_relaxed(tx_ring->tqp->io_base +
|
|
|
|
HNS3_RING_TX_RING_BD_NUM_REG);
|
|
|
|
bd_err = readl_relaxed(tx_ring->tqp->io_base +
|
|
|
|
HNS3_RING_TX_RING_BD_ERR_REG);
|
|
|
|
ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
|
|
|
|
tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
|
|
|
|
|
2017-11-02 20:45:20 +08:00
|
|
|
netdev_info(ndev,
|
2019-04-19 11:05:42 +08:00
|
|
|
"BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
|
|
|
|
bd_num, hw_head, hw_tail, bd_err,
|
2017-11-02 20:45:20 +08:00
|
|
|
readl(tx_ring->tqp_vector->mask_addr));
|
2019-04-19 11:05:42 +08:00
|
|
|
netdev_info(ndev,
|
|
|
|
"RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
|
|
|
|
ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
|
2017-11-02 20:45:20 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_nic_net_timeout(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
|
|
|
|
if (!hns3_get_tx_timeo_queue_info(ndev))
|
|
|
|
return;
|
|
|
|
|
2018-11-07 12:06:14 +08:00
|
|
|
/* request the reset, and let the hclge to determine
|
|
|
|
* which reset level should be done
|
|
|
|
*/
|
2017-11-02 20:45:20 +08:00
|
|
|
if (h->ae_algo->ops->reset_event)
|
2018-10-20 03:15:27 +08:00
|
|
|
h->ae_algo->ops->reset_event(h->pdev, h);
|
2017-11-02 20:45:20 +08:00
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static const struct net_device_ops hns3_nic_netdev_ops = {
|
|
|
|
.ndo_open = hns3_nic_net_open,
|
|
|
|
.ndo_stop = hns3_nic_net_stop,
|
|
|
|
.ndo_start_xmit = hns3_nic_net_xmit,
|
2017-11-02 20:45:20 +08:00
|
|
|
.ndo_tx_timeout = hns3_nic_net_timeout,
|
2017-08-02 23:59:45 +08:00
|
|
|
.ndo_set_mac_address = hns3_nic_net_set_mac_address,
|
2018-09-20 01:29:51 +08:00
|
|
|
.ndo_do_ioctl = hns3_nic_do_ioctl,
|
2017-08-22 00:05:24 +08:00
|
|
|
.ndo_change_mtu = hns3_nic_change_mtu,
|
2017-08-02 23:59:45 +08:00
|
|
|
.ndo_set_features = hns3_nic_set_features,
|
|
|
|
.ndo_get_stats64 = hns3_nic_get_stats64,
|
|
|
|
.ndo_setup_tc = hns3_nic_setup_tc,
|
|
|
|
.ndo_set_rx_mode = hns3_nic_set_rx_mode,
|
|
|
|
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
|
|
|
|
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
|
|
|
|
};
|
|
|
|
|
2019-04-19 11:05:47 +08:00
|
|
|
bool hns3_is_phys_func(struct pci_dev *pdev)
|
2018-05-16 02:20:05 +08:00
|
|
|
{
|
|
|
|
u32 dev_id = pdev->device;
|
|
|
|
|
|
|
|
switch (dev_id) {
|
|
|
|
case HNAE3_DEV_ID_GE:
|
|
|
|
case HNAE3_DEV_ID_25GE:
|
|
|
|
case HNAE3_DEV_ID_25GE_RDMA:
|
|
|
|
case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
|
|
|
|
case HNAE3_DEV_ID_50GE_RDMA:
|
|
|
|
case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
|
|
|
|
case HNAE3_DEV_ID_100G_RDMA_MACSEC:
|
|
|
|
return true;
|
|
|
|
case HNAE3_DEV_ID_100G_VF:
|
|
|
|
case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
|
|
|
|
return false;
|
|
|
|
default:
|
|
|
|
dev_warn(&pdev->dev, "un-recognized pci device-id %d",
|
|
|
|
dev_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_disable_sriov(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
/* If our VFs are assigned we cannot shut down SR-IOV
|
|
|
|
* without causing issues, so just leave the hardware
|
|
|
|
* available but disabled
|
|
|
|
*/
|
|
|
|
if (pci_vfs_assigned(pdev)) {
|
|
|
|
dev_warn(&pdev->dev,
|
|
|
|
"disabling driver while VFs are assigned\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_disable_sriov(pdev);
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:46:41 +08:00
|
|
|
static void hns3_get_dev_capability(struct pci_dev *pdev,
|
|
|
|
struct hnae3_ae_dev *ae_dev)
|
|
|
|
{
|
2018-11-15 17:29:21 +08:00
|
|
|
if (pdev->revision >= 0x21) {
|
2018-10-01 19:46:41 +08:00
|
|
|
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
|
2018-11-15 17:29:21 +08:00
|
|
|
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
|
|
|
|
}
|
2018-10-01 19:46:41 +08:00
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* hns3_probe - Device initialization routine
|
|
|
|
* @pdev: PCI device information struct
|
|
|
|
* @ent: entry in hns3_pci_tbl
|
|
|
|
*
|
|
|
|
* hns3_probe initializes a PF identified by a pci_dev structure.
|
|
|
|
* The OS initialization, configuring of the PF private structure,
|
|
|
|
* and a hardware reset occur.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, negative on failure
|
|
|
|
*/
|
|
|
|
static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
{
|
|
|
|
struct hnae3_ae_dev *ae_dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ae_dev) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ae_dev->pdev = pdev;
|
2017-09-20 18:52:50 +08:00
|
|
|
ae_dev->flag = ent->driver_data;
|
2017-08-02 23:59:45 +08:00
|
|
|
ae_dev->dev_type = HNAE3_DEV_KNIC;
|
2018-10-01 19:46:45 +08:00
|
|
|
ae_dev->reset_type = HNAE3_NONE_RESET;
|
2018-10-01 19:46:41 +08:00
|
|
|
hns3_get_dev_capability(pdev, ae_dev);
|
2017-08-02 23:59:45 +08:00
|
|
|
pci_set_drvdata(pdev, ae_dev);
|
|
|
|
|
2019-01-31 04:55:44 +08:00
|
|
|
ret = hnae3_register_ae_dev(ae_dev);
|
|
|
|
if (ret) {
|
|
|
|
devm_kfree(&pdev->dev, ae_dev);
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
|
}
|
2018-05-16 02:20:05 +08:00
|
|
|
|
2019-01-31 04:55:44 +08:00
|
|
|
return ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* hns3_remove - Device removal routine
|
|
|
|
* @pdev: PCI device information struct
|
|
|
|
*/
|
|
|
|
static void hns3_remove(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
|
|
|
|
2018-05-16 02:20:05 +08:00
|
|
|
if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
|
|
|
|
hns3_disable_sriov(pdev);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
hnae3_unregister_ae_dev(ae_dev);
|
2019-01-27 00:49:11 +08:00
|
|
|
pci_set_drvdata(pdev, NULL);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2018-05-16 02:20:13 +08:00
|
|
|
/**
|
|
|
|
* hns3_pci_sriov_configure
|
|
|
|
* @pdev: pointer to a pci_dev structure
|
|
|
|
* @num_vfs: number of VFs to allocate
|
|
|
|
*
|
|
|
|
* Enable or change the number of VFs. Called when the user updates the number
|
|
|
|
* of VFs in sysfs.
|
|
|
|
**/
|
2018-05-19 23:53:15 +08:00
|
|
|
static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
2018-05-16 02:20:13 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
|
|
|
|
dev_warn(&pdev->dev, "Can not config SRIOV\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (num_vfs) {
|
|
|
|
ret = pci_enable_sriov(pdev, num_vfs);
|
|
|
|
if (ret)
|
|
|
|
dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
|
2018-05-19 23:53:15 +08:00
|
|
|
else
|
|
|
|
return num_vfs;
|
2018-05-16 02:20:13 +08:00
|
|
|
} else if (!pci_vfs_assigned(pdev)) {
|
|
|
|
pci_disable_sriov(pdev);
|
|
|
|
} else {
|
|
|
|
dev_warn(&pdev->dev,
|
|
|
|
"Unable to free VFs because some are assigned to VMs.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-03 18:21:49 +08:00
|
|
|
static void hns3_shutdown(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
hnae3_unregister_ae_dev(ae_dev);
|
|
|
|
devm_kfree(&pdev->dev, ae_dev);
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
|
|
|
|
|
if (system_state == SYSTEM_POWER_OFF)
|
|
|
|
pci_set_power_state(pdev, PCI_D3hot);
|
|
|
|
}
|
|
|
|
|
2018-10-20 03:15:26 +08:00
|
|
|
static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
|
|
|
|
pci_channel_state_t state)
|
|
|
|
{
|
|
|
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
|
|
|
pci_ers_result_t ret;
|
|
|
|
|
|
|
|
dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
|
|
|
|
|
|
|
|
if (state == pci_channel_io_perm_failure)
|
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
|
|
|
|
if (!ae_dev) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"Can't recover - error happened during device init\n");
|
|
|
|
return PCI_ERS_RESULT_NONE;
|
|
|
|
}
|
|
|
|
|
2018-12-08 05:08:02 +08:00
|
|
|
if (ae_dev->ops->handle_hw_ras_error)
|
|
|
|
ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
|
2018-10-20 03:15:26 +08:00
|
|
|
else
|
|
|
|
return PCI_ERS_RESULT_NONE;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-10-20 03:15:27 +08:00
|
|
|
static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
|
|
|
|
dev_info(dev, "requesting reset due to PCI error\n");
|
|
|
|
|
|
|
|
/* request the reset */
|
|
|
|
if (ae_dev->ops->reset_event) {
|
2019-03-10 14:47:51 +08:00
|
|
|
if (!ae_dev->override_pci_need_reset)
|
|
|
|
ae_dev->ops->reset_event(pdev, NULL);
|
|
|
|
|
2018-10-20 03:15:27 +08:00
|
|
|
return PCI_ERS_RESULT_RECOVERED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
}
|
|
|
|
|
2018-11-09 22:07:54 +08:00
|
|
|
static void hns3_reset_prepare(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
dev_info(&pdev->dev, "hns3 flr prepare\n");
|
|
|
|
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
|
|
|
|
ae_dev->ops->flr_prepare(ae_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_reset_done(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
dev_info(&pdev->dev, "hns3 flr done\n");
|
|
|
|
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
|
|
|
|
ae_dev->ops->flr_done(ae_dev);
|
|
|
|
}
|
|
|
|
|
2018-10-20 03:15:26 +08:00
|
|
|
static const struct pci_error_handlers hns3_err_handler = {
|
|
|
|
.error_detected = hns3_error_detected,
|
2018-10-20 03:15:27 +08:00
|
|
|
.slot_reset = hns3_slot_reset,
|
2018-11-09 22:07:54 +08:00
|
|
|
.reset_prepare = hns3_reset_prepare,
|
|
|
|
.reset_done = hns3_reset_done,
|
2018-10-20 03:15:26 +08:00
|
|
|
};
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static struct pci_driver hns3_driver = {
|
|
|
|
.name = hns3_driver_name,
|
|
|
|
.id_table = hns3_pci_tbl,
|
|
|
|
.probe = hns3_probe,
|
|
|
|
.remove = hns3_remove,
|
2018-09-03 18:21:49 +08:00
|
|
|
.shutdown = hns3_shutdown,
|
2018-05-16 02:20:13 +08:00
|
|
|
.sriov_configure = hns3_pci_sriov_configure,
|
2018-10-20 03:15:26 +08:00
|
|
|
.err_handler = &hns3_err_handler,
|
2017-08-02 23:59:45 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* set default feature to hns3 */
|
|
|
|
static void hns3_set_default_feature(struct net_device *netdev)
|
|
|
|
{
|
2018-08-15 00:13:19 +08:00
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
|
|
struct pci_dev *pdev = h->pdev;
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
netdev->priv_flags |= IFF_UNICAST_FLT;
|
|
|
|
|
|
|
|
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
|
|
|
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
|
|
|
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
|
|
|
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
2018-09-27 02:28:31 +08:00
|
|
|
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
|
|
|
|
|
|
|
|
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
|
|
|
|
|
|
|
|
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_FILTER |
|
2017-12-22 12:21:47 +08:00
|
|
|
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
2017-08-02 23:59:45 +08:00
|
|
|
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
|
|
|
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
|
|
|
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
2018-09-27 02:28:31 +08:00
|
|
|
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
netdev->vlan_features |=
|
|
|
|
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
|
|
|
|
NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
|
|
|
|
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
|
|
|
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
2018-09-27 02:28:31 +08:00
|
|
|
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
2018-05-04 00:28:11 +08:00
|
|
|
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
2017-08-02 23:59:45 +08:00
|
|
|
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
|
|
|
|
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
|
|
|
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
|
2018-09-27 02:28:31 +08:00
|
|
|
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
|
2018-08-15 00:13:19 +08:00
|
|
|
|
2018-10-01 19:46:47 +08:00
|
|
|
if (pdev->revision >= 0x21) {
|
2019-02-02 22:39:35 +08:00
|
|
|
netdev->hw_features |= NETIF_F_GRO_HW;
|
2018-11-15 17:29:23 +08:00
|
|
|
netdev->features |= NETIF_F_GRO_HW;
|
2018-10-01 19:46:47 +08:00
|
|
|
|
|
|
|
if (!(h->flags & HNAE3_SUPPORT_VF)) {
|
|
|
|
netdev->hw_features |= NETIF_F_NTUPLE;
|
|
|
|
netdev->features |= NETIF_F_NTUPLE;
|
|
|
|
}
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
|
|
|
|
struct hns3_desc_cb *cb)
|
|
|
|
{
|
2018-07-02 15:50:26 +08:00
|
|
|
unsigned int order = hnae3_page_order(ring);
|
2017-08-02 23:59:45 +08:00
|
|
|
struct page *p;
|
|
|
|
|
|
|
|
p = dev_alloc_pages(order);
|
|
|
|
if (!p)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
cb->priv = p;
|
|
|
|
cb->page_offset = 0;
|
|
|
|
cb->reuse_flag = 0;
|
|
|
|
cb->buf = page_address(p);
|
2018-07-02 15:50:26 +08:00
|
|
|
cb->length = hnae3_page_size(ring);
|
2017-08-02 23:59:45 +08:00
|
|
|
cb->type = DESC_TYPE_PAGE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_free_buffer(struct hns3_enet_ring *ring,
|
|
|
|
struct hns3_desc_cb *cb)
|
|
|
|
{
|
|
|
|
if (cb->type == DESC_TYPE_SKB)
|
|
|
|
dev_kfree_skb_any((struct sk_buff *)cb->priv);
|
|
|
|
else if (!HNAE3_IS_TX_RING(ring))
|
|
|
|
put_page((struct page *)cb->priv);
|
|
|
|
memset(cb, 0, sizeof(*cb));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
|
|
|
|
{
|
|
|
|
cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
|
|
|
|
cb->length, ring_to_dma_dir(ring));
|
|
|
|
|
2018-09-21 23:41:44 +08:00
|
|
|
if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
|
2017-08-02 23:59:45 +08:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
|
|
|
|
struct hns3_desc_cb *cb)
|
|
|
|
{
|
|
|
|
if (cb->type == DESC_TYPE_SKB)
|
|
|
|
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
|
|
|
|
ring_to_dma_dir(ring));
|
2018-10-16 19:58:52 +08:00
|
|
|
else if (cb->length)
|
2017-08-02 23:59:45 +08:00
|
|
|
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
|
|
|
|
ring_to_dma_dir(ring));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
|
|
|
|
{
|
|
|
|
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
|
|
|
|
ring->desc[i].addr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
|
|
|
|
{
|
|
|
|
struct hns3_desc_cb *cb = &ring->desc_cb[i];
|
|
|
|
|
|
|
|
if (!ring->desc_cb[i].dma)
|
|
|
|
return;
|
|
|
|
|
|
|
|
hns3_buffer_detach(ring, i);
|
|
|
|
hns3_free_buffer(ring, cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_free_buffers(struct hns3_enet_ring *ring)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ring->desc_num; i++)
|
|
|
|
hns3_free_buffer_detach(ring, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* free desc along with its attached buffer */
|
|
|
|
static void hns3_free_desc(struct hns3_enet_ring *ring)
|
|
|
|
{
|
2018-07-02 15:50:25 +08:00
|
|
|
int size = ring->desc_num * sizeof(ring->desc[0]);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
hns3_free_buffers(ring);
|
|
|
|
|
2018-07-02 15:50:25 +08:00
|
|
|
if (ring->desc) {
|
|
|
|
dma_free_coherent(ring_to_dev(ring), size,
|
|
|
|
ring->desc, ring->desc_dma_addr);
|
|
|
|
ring->desc = NULL;
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_alloc_desc(struct hns3_enet_ring *ring)
|
|
|
|
{
|
|
|
|
int size = ring->desc_num * sizeof(ring->desc[0]);
|
|
|
|
|
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-01-04 16:23:09 +08:00
|
|
|
ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
|
|
|
|
&ring->desc_dma_addr, GFP_KERNEL);
|
2017-08-02 23:59:45 +08:00
|
|
|
if (!ring->desc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
|
|
|
|
struct hns3_desc_cb *cb)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = hns3_alloc_buffer(ring, cb);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = hns3_map_buffer(ring, cb);
|
|
|
|
if (ret)
|
|
|
|
goto out_with_buf;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_with_buf:
|
2017-10-23 19:51:02 +08:00
|
|
|
hns3_free_buffer(ring, cb);
|
2017-08-02 23:59:45 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
|
|
|
|
{
|
|
|
|
int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate memory for raw pkg, and map with dma */
|
|
|
|
static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
|
|
|
|
{
|
|
|
|
int i, j, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < ring->desc_num; i++) {
|
|
|
|
ret = hns3_alloc_buffer_attach(ring, i);
|
|
|
|
if (ret)
|
|
|
|
goto out_buffer_fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_buffer_fail:
|
|
|
|
for (j = i - 1; j >= 0; j--)
|
|
|
|
hns3_free_buffer_detach(ring, j);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* detach a in-used buffer and replace with a reserved one */
|
|
|
|
static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
|
|
|
|
struct hns3_desc_cb *res_cb)
|
|
|
|
{
|
2017-10-23 19:51:01 +08:00
|
|
|
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
|
2017-08-02 23:59:45 +08:00
|
|
|
ring->desc_cb[i] = *res_cb;
|
|
|
|
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
|
2018-05-26 02:43:02 +08:00
|
|
|
ring->desc[i].rx.bd_base_info = 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
|
|
|
|
{
|
|
|
|
ring->desc_cb[i].reuse_flag = 0;
|
|
|
|
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
|
|
|
|
+ ring->desc_cb[i].page_offset);
|
2018-05-26 02:43:02 +08:00
|
|
|
ring->desc[i].rx.bd_base_info = 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2019-05-06 10:48:49 +08:00
|
|
|
static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
|
|
|
|
int *bytes, int *pkts)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
2019-04-25 20:42:45 +08:00
|
|
|
int ntc = ring->next_to_clean;
|
|
|
|
struct hns3_desc_cb *desc_cb;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:49 +08:00
|
|
|
while (head != ntc) {
|
|
|
|
desc_cb = &ring->desc_cb[ntc];
|
|
|
|
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
|
|
|
|
(*bytes) += desc_cb->length;
|
|
|
|
/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
|
|
|
|
hns3_free_buffer_detach(ring, ntc);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:49 +08:00
|
|
|
if (++ntc == ring->desc_num)
|
|
|
|
ntc = 0;
|
|
|
|
|
|
|
|
/* Issue prefetch for next Tx descriptor */
|
|
|
|
prefetch(&ring->desc_cb[ntc]);
|
|
|
|
}
|
2019-04-25 20:42:45 +08:00
|
|
|
|
|
|
|
/* This smp_store_release() pairs with smp_load_acquire() in
|
|
|
|
* ring_space called by hns3_nic_net_xmit.
|
|
|
|
*/
|
|
|
|
smp_store_release(&ring->next_to_clean, ntc);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
|
|
|
|
{
|
|
|
|
int u = ring->next_to_use;
|
|
|
|
int c = ring->next_to_clean;
|
|
|
|
|
|
|
|
if (unlikely(h > ring->desc_num))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return u > c ? (h > c && h <= u) : (h > c || h <= u);
|
|
|
|
}
|
|
|
|
|
2018-09-20 01:29:49 +08:00
|
|
|
void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
|
2018-09-21 23:41:43 +08:00
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
struct netdev_queue *dev_queue;
|
|
|
|
int bytes, pkts;
|
|
|
|
int head;
|
|
|
|
|
|
|
|
head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
|
|
|
|
rmb(); /* Make sure head is ready before touch any data */
|
|
|
|
|
|
|
|
if (is_ring_empty(ring) || head == ring->next_to_clean)
|
2018-09-20 01:29:49 +08:00
|
|
|
return; /* no data to poll */
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-06-28 12:12:23 +08:00
|
|
|
if (unlikely(!is_valid_clean_head(ring, head))) {
|
2017-08-02 23:59:45 +08:00
|
|
|
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
|
|
|
|
ring->next_to_use, ring->next_to_clean);
|
|
|
|
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.io_err_cnt++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
2018-09-20 01:29:49 +08:00
|
|
|
return;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bytes = 0;
|
|
|
|
pkts = 0;
|
2019-05-06 10:48:49 +08:00
|
|
|
hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
ring->tqp_vector->tx_group.total_bytes += bytes;
|
|
|
|
ring->tqp_vector->tx_group.total_packets += pkts;
|
|
|
|
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.tx_bytes += bytes;
|
|
|
|
ring->stats.tx_pkts += pkts;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
|
|
|
|
dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
|
|
|
|
netdev_tx_completed_queue(dev_queue, pkts, bytes);
|
|
|
|
|
|
|
|
if (unlikely(pkts && netif_carrier_ok(netdev) &&
|
|
|
|
(ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
|
|
|
|
/* Make sure that anybody stopping the queue after this
|
|
|
|
* sees the new next_to_clean.
|
|
|
|
*/
|
|
|
|
smp_mb();
|
2018-09-21 23:41:43 +08:00
|
|
|
if (netif_tx_queue_stopped(dev_queue) &&
|
|
|
|
!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
|
2017-08-02 23:59:45 +08:00
|
|
|
netif_tx_wake_queue(dev_queue);
|
|
|
|
ring->stats.restart_queue++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_desc_unused(struct hns3_enet_ring *ring)
|
|
|
|
{
|
|
|
|
int ntc = ring->next_to_clean;
|
|
|
|
int ntu = ring->next_to_use;
|
|
|
|
|
|
|
|
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
|
|
|
|
{
|
|
|
|
struct hns3_desc_cb *desc_cb;
|
|
|
|
struct hns3_desc_cb res_cbs;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < cleand_count; i++) {
|
|
|
|
desc_cb = &ring->desc_cb[ring->next_to_use];
|
|
|
|
if (desc_cb->reuse_flag) {
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.reuse_pg_cnt++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
|
|
|
|
hns3_reuse_buffer(ring, ring->next_to_use);
|
|
|
|
} else {
|
|
|
|
ret = hns3_reserve_buffer_map(ring, &res_cbs);
|
|
|
|
if (ret) {
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.sw_err_cnt++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
|
|
|
|
netdev_err(ring->tqp->handle->kinfo.netdev,
|
|
|
|
"hnae reserve buffer map failed.\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
|
2019-05-06 10:48:43 +08:00
|
|
|
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.non_reuse_pg++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ring_ptr_move_fw(ring, next_to_use);
|
|
|
|
}
|
|
|
|
|
|
|
|
wmb(); /* Make all data has been write before submit */
|
|
|
|
writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
|
|
|
|
struct hns3_enet_ring *ring, int pull_len,
|
|
|
|
struct hns3_desc_cb *desc_cb)
|
|
|
|
{
|
2019-05-06 10:48:50 +08:00
|
|
|
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
|
|
|
|
int size = le16_to_cpu(desc->rx.size);
|
|
|
|
u32 truesize = hnae3_buf_size(ring);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
|
2018-03-10 11:29:26 +08:00
|
|
|
size - pull_len, truesize);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-05-06 10:48:50 +08:00
|
|
|
/* Avoid re-using remote pages, or the stack is still using the page
|
|
|
|
* when page_offset rollback to zero, flag default unreuse
|
|
|
|
*/
|
|
|
|
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
|
|
|
|
(!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
|
2017-08-02 23:59:45 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Move offset up to the next cache line */
|
|
|
|
desc_cb->page_offset += truesize;
|
|
|
|
|
2019-05-06 10:48:50 +08:00
|
|
|
if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
|
2017-08-02 23:59:45 +08:00
|
|
|
desc_cb->reuse_flag = 1;
|
|
|
|
/* Bump ref count on page before it is given*/
|
|
|
|
get_page(desc_cb->priv);
|
2019-05-06 10:48:50 +08:00
|
|
|
} else if (page_count(desc_cb->priv) == 1) {
|
|
|
|
desc_cb->reuse_flag = 1;
|
|
|
|
desc_cb->page_offset = 0;
|
|
|
|
get_page(desc_cb->priv);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-14 09:47:40 +08:00
|
|
|
static int hns3_gro_complete(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
__be16 type = skb->protocol;
|
|
|
|
struct tcphdr *th;
|
|
|
|
int depth = 0;
|
|
|
|
|
|
|
|
while (type == htons(ETH_P_8021Q)) {
|
|
|
|
struct vlan_hdr *vh;
|
|
|
|
|
|
|
|
if ((depth + VLAN_HLEN) > skb_headlen(skb))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
vh = (struct vlan_hdr *)(skb->data + depth);
|
|
|
|
type = vh->h_vlan_encapsulated_proto;
|
|
|
|
depth += VLAN_HLEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type == htons(ETH_P_IP)) {
|
|
|
|
depth += sizeof(struct iphdr);
|
|
|
|
} else if (type == htons(ETH_P_IPV6)) {
|
|
|
|
depth += sizeof(struct ipv6hdr);
|
|
|
|
} else {
|
|
|
|
netdev_err(skb->dev,
|
|
|
|
"Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
|
|
|
|
be16_to_cpu(type), depth);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
th = (struct tcphdr *)(skb->data + depth);
|
|
|
|
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
|
|
|
|
if (th->cwr)
|
|
|
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
|
|
|
|
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
|
2019-05-06 10:48:45 +08:00
|
|
|
u32 l234info, u32 bd_base_info, u32 ol_info)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
|
|
|
|
int l3_type, l4_type;
|
|
|
|
int ol4_type;
|
|
|
|
|
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
|
|
|
|
skb_checksum_none_assert(skb);
|
|
|
|
|
|
|
|
if (!(netdev->features & NETIF_F_RXCSUM))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* check if hardware has done checksum */
|
2019-02-23 17:22:14 +08:00
|
|
|
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
|
2017-08-02 23:59:45 +08:00
|
|
|
return;
|
|
|
|
|
2019-03-06 16:12:34 +08:00
|
|
|
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
|
|
|
|
BIT(HNS3_RXD_OL3E_B) |
|
2019-02-23 17:22:14 +08:00
|
|
|
BIT(HNS3_RXD_OL4E_B)))) {
|
2017-08-02 23:59:45 +08:00
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.l3l4_csum_err++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-06 10:48:45 +08:00
|
|
|
ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
|
2018-07-02 15:50:26 +08:00
|
|
|
HNS3_RXD_OL4ID_S);
|
2017-08-02 23:59:45 +08:00
|
|
|
switch (ol4_type) {
|
|
|
|
case HNS3_OL4_TYPE_MAC_IN_UDP:
|
|
|
|
case HNS3_OL4_TYPE_NVGRE:
|
|
|
|
skb->csum_level = 1;
|
2018-08-08 07:18:30 +08:00
|
|
|
/* fall through */
|
2017-08-02 23:59:45 +08:00
|
|
|
case HNS3_OL4_TYPE_NO_TUN:
|
2019-02-23 17:22:10 +08:00
|
|
|
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
|
|
|
|
HNS3_RXD_L3ID_S);
|
|
|
|
l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
|
|
|
|
HNS3_RXD_L4ID_S);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
|
2018-06-28 12:12:22 +08:00
|
|
|
if ((l3_type == HNS3_L3_TYPE_IPV4 ||
|
|
|
|
l3_type == HNS3_L3_TYPE_IPV6) &&
|
|
|
|
(l4_type == HNS3_L4_TYPE_UDP ||
|
|
|
|
l4_type == HNS3_L4_TYPE_TCP ||
|
|
|
|
l4_type == HNS3_L4_TYPE_SCTP))
|
2017-08-02 23:59:45 +08:00
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
break;
|
2018-09-21 23:41:42 +08:00
|
|
|
default:
|
|
|
|
break;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-20 10:19:21 +08:00
|
|
|
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
|
|
|
|
{
|
2018-11-15 17:29:24 +08:00
|
|
|
if (skb_has_frag_list(skb))
|
|
|
|
napi_gro_flush(&ring->tqp_vector->napi, false);
|
|
|
|
|
2017-10-20 10:19:21 +08:00
|
|
|
napi_gro_receive(&ring->tqp_vector->napi, skb);
|
|
|
|
}
|
|
|
|
|
2018-10-06 01:03:29 +08:00
|
|
|
static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
|
|
|
|
struct hns3_desc *desc, u32 l234info,
|
|
|
|
u16 *vlan_tag)
|
2018-05-26 02:42:58 +08:00
|
|
|
{
|
net: hns3: fix VLAN offload handle for VLAN inserted by port
Currently, in TX direction, driver implements the TX VLAN offload
by checking the VLAN header in skb, and filling it into TX descriptor.
Usually it works well, but if enable inserting VLAN header based on
port, it may conflict when out_tag field of TX descriptor is already
used, and cause RAS error.
In RX direction, hardware supports stripping max two VLAN headers.
For vlan_tci in skb can only store one VLAN tag, when RX VLAN offload
enabled, driver tells hardware to strip one VLAN header from RX
packet; when RX VLAN offload disabled, driver tells hardware not to
strip VLAN header from RX packet. Now if port based insert VLAN
enabled, all RX packets will have the port based VLAN header. This
header is useless for stack, driver needs to ask hardware to strip
it. Unfortunately, hardware can't drop this VLAN header, and always
fill it into RX descriptor, so driver has to identify and drop it.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-14 09:47:36 +08:00
|
|
|
struct hnae3_handle *handle = ring->tqp->handle;
|
2018-05-26 02:42:58 +08:00
|
|
|
struct pci_dev *pdev = ring->tqp->handle->pdev;
|
|
|
|
|
|
|
|
if (pdev->revision == 0x20) {
|
2018-10-06 01:03:29 +08:00
|
|
|
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
|
|
|
|
if (!(*vlan_tag & VLAN_VID_MASK))
|
|
|
|
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
|
2018-05-26 02:42:58 +08:00
|
|
|
|
2018-10-06 01:03:29 +08:00
|
|
|
return (*vlan_tag != 0);
|
2018-05-26 02:42:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define HNS3_STRP_OUTER_VLAN 0x1
|
|
|
|
#define HNS3_STRP_INNER_VLAN 0x2
|
net: hns3: fix VLAN offload handle for VLAN inserted by port
Currently, in TX direction, driver implements the TX VLAN offload
by checking the VLAN header in skb, and filling it into TX descriptor.
Usually it works well, but if enable inserting VLAN header based on
port, it may conflict when out_tag field of TX descriptor is already
used, and cause RAS error.
In RX direction, hardware supports stripping max two VLAN headers.
For vlan_tci in skb can only store one VLAN tag, when RX VLAN offload
enabled, driver tells hardware to strip one VLAN header from RX
packet; when RX VLAN offload disabled, driver tells hardware not to
strip VLAN header from RX packet. Now if port based insert VLAN
enabled, all RX packets will have the port based VLAN header. This
header is useless for stack, driver needs to ask hardware to strip
it. Unfortunately, hardware can't drop this VLAN header, and always
fill it into RX descriptor, so driver has to identify and drop it.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-14 09:47:36 +08:00
|
|
|
#define HNS3_STRP_BOTH 0x3
|
2018-05-26 02:42:58 +08:00
|
|
|
|
net: hns3: fix VLAN offload handle for VLAN inserted by port
Currently, in TX direction, driver implements the TX VLAN offload
by checking the VLAN header in skb, and filling it into TX descriptor.
Usually it works well, but if enable inserting VLAN header based on
port, it may conflict when out_tag field of TX descriptor is already
used, and cause RAS error.
In RX direction, hardware supports stripping max two VLAN headers.
For vlan_tci in skb can only store one VLAN tag, when RX VLAN offload
enabled, driver tells hardware to strip one VLAN header from RX
packet; when RX VLAN offload disabled, driver tells hardware not to
strip VLAN header from RX packet. Now if port based insert VLAN
enabled, all RX packets will have the port based VLAN header. This
header is useless for stack, driver needs to ask hardware to strip
it. Unfortunately, hardware can't drop this VLAN header, and always
fill it into RX descriptor, so driver has to identify and drop it.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-14 09:47:36 +08:00
|
|
|
/* Hardware always insert VLAN tag into RX descriptor when
|
|
|
|
* remove the tag from packet, driver needs to determine
|
|
|
|
* reporting which tag to stack.
|
|
|
|
*/
|
2018-07-02 15:50:26 +08:00
|
|
|
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
|
|
|
|
HNS3_RXD_STRP_TAGP_S)) {
|
2018-05-26 02:42:58 +08:00
|
|
|
case HNS3_STRP_OUTER_VLAN:
|
net: hns3: fix VLAN offload handle for VLAN inserted by port
Currently, in TX direction, driver implements the TX VLAN offload
by checking the VLAN header in skb, and filling it into TX descriptor.
Usually it works well, but if enable inserting VLAN header based on
port, it may conflict when out_tag field of TX descriptor is already
used, and cause RAS error.
In RX direction, hardware supports stripping max two VLAN headers.
For vlan_tci in skb can only store one VLAN tag, when RX VLAN offload
enabled, driver tells hardware to strip one VLAN header from RX
packet; when RX VLAN offload disabled, driver tells hardware not to
strip VLAN header from RX packet. Now if port based insert VLAN
enabled, all RX packets will have the port based VLAN header. This
header is useless for stack, driver needs to ask hardware to strip
it. Unfortunately, hardware can't drop this VLAN header, and always
fill it into RX descriptor, so driver has to identify and drop it.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-14 09:47:36 +08:00
|
|
|
if (handle->port_base_vlan_state !=
|
|
|
|
HNAE3_PORT_BASE_VLAN_DISABLE)
|
|
|
|
return false;
|
|
|
|
|
2018-10-06 01:03:29 +08:00
|
|
|
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
|
|
|
|
return true;
|
2018-05-26 02:42:58 +08:00
|
|
|
case HNS3_STRP_INNER_VLAN:
|
net: hns3: fix VLAN offload handle for VLAN inserted by port
Currently, in TX direction, driver implements the TX VLAN offload
by checking the VLAN header in skb, and filling it into TX descriptor.
Usually it works well, but if enable inserting VLAN header based on
port, it may conflict when out_tag field of TX descriptor is already
used, and cause RAS error.
In RX direction, hardware supports stripping max two VLAN headers.
For vlan_tci in skb can only store one VLAN tag, when RX VLAN offload
enabled, driver tells hardware to strip one VLAN header from RX
packet; when RX VLAN offload disabled, driver tells hardware not to
strip VLAN header from RX packet. Now if port based insert VLAN
enabled, all RX packets will have the port based VLAN header. This
header is useless for stack, driver needs to ask hardware to strip
it. Unfortunately, hardware can't drop this VLAN header, and always
fill it into RX descriptor, so driver has to identify and drop it.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-14 09:47:36 +08:00
|
|
|
if (handle->port_base_vlan_state !=
|
|
|
|
HNAE3_PORT_BASE_VLAN_DISABLE)
|
|
|
|
return false;
|
|
|
|
|
2018-10-06 01:03:29 +08:00
|
|
|
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
|
net: hns3: fix VLAN offload handle for VLAN inserted by port
Currently, in TX direction, driver implements the TX VLAN offload
by checking the VLAN header in skb, and filling it into TX descriptor.
Usually it works well, but if enable inserting VLAN header based on
port, it may conflict when out_tag field of TX descriptor is already
used, and cause RAS error.
In RX direction, hardware supports stripping max two VLAN headers.
For vlan_tci in skb can only store one VLAN tag, when RX VLAN offload
enabled, driver tells hardware to strip one VLAN header from RX
packet; when RX VLAN offload disabled, driver tells hardware not to
strip VLAN header from RX packet. Now if port based insert VLAN
enabled, all RX packets will have the port based VLAN header. This
header is useless for stack, driver needs to ask hardware to strip
it. Unfortunately, hardware can't drop this VLAN header, and always
fill it into RX descriptor, so driver has to identify and drop it.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-14 09:47:36 +08:00
|
|
|
return true;
|
|
|
|
case HNS3_STRP_BOTH:
|
|
|
|
if (handle->port_base_vlan_state ==
|
|
|
|
HNAE3_PORT_BASE_VLAN_DISABLE)
|
|
|
|
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
|
|
|
|
else
|
|
|
|
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
|
|
|
|
|
2018-10-06 01:03:29 +08:00
|
|
|
return true;
|
2018-05-26 02:42:58 +08:00
|
|
|
default:
|
2018-10-06 01:03:29 +08:00
|
|
|
return false;
|
2018-05-26 02:42:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-15 17:29:22 +08:00
|
|
|
static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
|
|
|
|
unsigned char *va)
|
|
|
|
{
|
|
|
|
#define HNS3_NEED_ADD_FRAG 1
|
|
|
|
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
|
|
|
|
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
|
|
|
|
skb = ring->skb;
|
|
|
|
if (unlikely(!skb)) {
|
|
|
|
netdev_err(netdev, "alloc rx skb fail\n");
|
|
|
|
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.sw_err_cnt++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
prefetchw(skb->data);
|
|
|
|
|
|
|
|
ring->pending_buf = 1;
|
2018-11-15 17:29:24 +08:00
|
|
|
ring->frag_num = 0;
|
|
|
|
ring->tail_skb = NULL;
|
2018-11-15 17:29:22 +08:00
|
|
|
if (length <= HNS3_RX_HEAD_SIZE) {
|
|
|
|
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
|
|
|
|
|
|
|
|
/* We can reuse buffer as-is, just make sure it is local */
|
|
|
|
if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
|
|
|
|
desc_cb->reuse_flag = 1;
|
|
|
|
else /* This page cannot be reused so discard it */
|
|
|
|
put_page(desc_cb->priv);
|
|
|
|
|
|
|
|
ring_ptr_move_fw(ring, next_to_clean);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.seg_pkt_cnt++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
|
2019-04-22 23:55:48 +08:00
|
|
|
ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
|
2018-11-15 17:29:22 +08:00
|
|
|
__skb_put(skb, ring->pull_len);
|
2018-11-15 17:29:24 +08:00
|
|
|
hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
|
2018-11-15 17:29:22 +08:00
|
|
|
desc_cb);
|
|
|
|
ring_ptr_move_fw(ring, next_to_clean);
|
|
|
|
|
|
|
|
return HNS3_NEED_ADD_FRAG;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
|
|
|
|
struct sk_buff **out_skb, bool pending)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = *out_skb;
|
2018-11-15 17:29:24 +08:00
|
|
|
struct sk_buff *head_skb = *out_skb;
|
|
|
|
struct sk_buff *new_skb;
|
2018-11-15 17:29:22 +08:00
|
|
|
struct hns3_desc_cb *desc_cb;
|
|
|
|
struct hns3_desc *pre_desc;
|
|
|
|
u32 bd_base_info;
|
|
|
|
int pre_bd;
|
|
|
|
|
|
|
|
/* if there is pending bd, the SW param next_to_clean has moved
|
|
|
|
* to next and the next is NULL
|
|
|
|
*/
|
|
|
|
if (pending) {
|
|
|
|
pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
|
|
|
|
ring->desc_num;
|
|
|
|
pre_desc = &ring->desc[pre_bd];
|
|
|
|
bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
|
|
|
|
} else {
|
|
|
|
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
|
|
|
|
}
|
|
|
|
|
2019-02-23 17:22:14 +08:00
|
|
|
while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
|
2018-11-15 17:29:22 +08:00
|
|
|
desc = &ring->desc[ring->next_to_clean];
|
|
|
|
desc_cb = &ring->desc_cb[ring->next_to_clean];
|
|
|
|
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
|
2019-03-06 11:26:37 +08:00
|
|
|
/* make sure HW write desc complete */
|
|
|
|
dma_rmb();
|
2019-02-23 17:22:14 +08:00
|
|
|
if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
|
2018-11-15 17:29:22 +08:00
|
|
|
return -ENXIO;
|
|
|
|
|
2018-11-15 17:29:24 +08:00
|
|
|
if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
|
|
|
|
new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
|
|
|
|
HNS3_RX_HEAD_SIZE);
|
|
|
|
if (unlikely(!new_skb)) {
|
|
|
|
netdev_err(ring->tqp->handle->kinfo.netdev,
|
|
|
|
"alloc rx skb frag fail\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
ring->frag_num = 0;
|
|
|
|
|
|
|
|
if (ring->tail_skb) {
|
|
|
|
ring->tail_skb->next = new_skb;
|
|
|
|
ring->tail_skb = new_skb;
|
|
|
|
} else {
|
|
|
|
skb_shinfo(skb)->frag_list = new_skb;
|
|
|
|
ring->tail_skb = new_skb;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ring->tail_skb) {
|
|
|
|
head_skb->truesize += hnae3_buf_size(ring);
|
|
|
|
head_skb->data_len += le16_to_cpu(desc->rx.size);
|
|
|
|
head_skb->len += le16_to_cpu(desc->rx.size);
|
|
|
|
skb = ring->tail_skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
|
2018-11-15 17:29:22 +08:00
|
|
|
ring_ptr_move_fw(ring, next_to_clean);
|
|
|
|
ring->pending_buf++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-14 09:47:40 +08:00
|
|
|
static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
|
|
|
|
struct sk_buff *skb, u32 l234info,
|
2019-05-06 10:48:45 +08:00
|
|
|
u32 bd_base_info, u32 ol_info)
|
2018-11-15 17:29:25 +08:00
|
|
|
{
|
|
|
|
u16 gro_count;
|
|
|
|
u32 l3_type;
|
|
|
|
|
|
|
|
gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
|
|
|
|
HNS3_RXD_GRO_COUNT_S);
|
|
|
|
/* if there is no HW GRO, do not set gro params */
|
2019-04-14 09:47:40 +08:00
|
|
|
if (!gro_count) {
|
2019-05-06 10:48:45 +08:00
|
|
|
hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
|
2019-04-14 09:47:40 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2018-11-15 17:29:25 +08:00
|
|
|
|
|
|
|
NAPI_GRO_CB(skb)->count = gro_count;
|
|
|
|
|
|
|
|
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
|
|
|
|
HNS3_RXD_L3ID_S);
|
|
|
|
if (l3_type == HNS3_L3_TYPE_IPV4)
|
|
|
|
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
|
|
|
|
else if (l3_type == HNS3_L3_TYPE_IPV6)
|
|
|
|
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
|
|
|
|
else
|
2019-04-14 09:47:40 +08:00
|
|
|
return -EFAULT;
|
2018-11-15 17:29:25 +08:00
|
|
|
|
|
|
|
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
|
|
|
|
HNS3_RXD_GRO_SIZE_M,
|
|
|
|
HNS3_RXD_GRO_SIZE_S);
|
2019-04-14 09:47:40 +08:00
|
|
|
|
|
|
|
return hns3_gro_complete(skb);
|
2018-11-15 17:29:25 +08:00
|
|
|
}
|
|
|
|
|
2018-10-11 03:05:37 +08:00
|
|
|
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
|
2019-04-25 20:42:47 +08:00
|
|
|
struct sk_buff *skb, u32 rss_hash)
|
2018-10-11 03:05:37 +08:00
|
|
|
{
|
|
|
|
struct hnae3_handle *handle = ring->tqp->handle;
|
|
|
|
enum pkt_hash_types rss_type;
|
|
|
|
|
2019-04-25 20:42:47 +08:00
|
|
|
if (rss_hash)
|
2018-10-11 03:05:37 +08:00
|
|
|
rss_type = handle->kinfo.rss_type;
|
|
|
|
else
|
|
|
|
rss_type = PKT_HASH_TYPE_NONE;
|
|
|
|
|
2019-04-25 20:42:47 +08:00
|
|
|
skb_set_hash(skb, rss_hash, rss_type);
|
2018-10-11 03:05:37 +08:00
|
|
|
}
|
|
|
|
|
2019-04-25 20:42:47 +08:00
|
|
|
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
|
2019-01-23 07:39:28 +08:00
|
|
|
enum hns3_pkt_l2t_type l2_frame_type;
|
2019-05-06 10:48:45 +08:00
|
|
|
u32 bd_base_info, l234info, ol_info;
|
2019-04-25 20:42:47 +08:00
|
|
|
struct hns3_desc *desc;
|
2019-04-14 09:47:40 +08:00
|
|
|
unsigned int len;
|
2019-04-25 20:42:47 +08:00
|
|
|
int pre_ntc, ret;
|
|
|
|
|
|
|
|
/* bdinfo handled below is only valid on the last BD of the
|
|
|
|
* current packet, and ring->next_to_clean indicates the first
|
|
|
|
* descriptor of next packet, so need - 1 below.
|
|
|
|
*/
|
|
|
|
pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
|
|
|
|
(ring->desc_num - 1);
|
|
|
|
desc = &ring->desc[pre_ntc];
|
|
|
|
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
|
|
|
|
l234info = le32_to_cpu(desc->rx.l234_info);
|
2019-05-06 10:48:45 +08:00
|
|
|
ol_info = le32_to_cpu(desc->rx.ol_info);
|
2019-04-14 09:47:40 +08:00
|
|
|
|
|
|
|
/* Based on hw strategy, the tag offloaded will be stored at
|
|
|
|
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
|
|
|
|
* in one layer tag case.
|
|
|
|
*/
|
|
|
|
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
|
|
|
|
u16 vlan_tag;
|
|
|
|
|
|
|
|
if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
|
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
|
|
|
vlan_tag);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.non_vld_descs++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
|
|
|
|
BIT(HNS3_RXD_L2E_B))))) {
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
if (l234info & BIT(HNS3_RXD_L2E_B))
|
|
|
|
ring->stats.l2_err++;
|
|
|
|
else
|
|
|
|
ring->stats.err_pkt_len++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = skb->len;
|
|
|
|
|
|
|
|
/* Do update ip stack process */
|
|
|
|
skb->protocol = eth_type_trans(skb, netdev);
|
|
|
|
|
|
|
|
/* This is needed in order to enable forwarding support */
|
2019-05-06 10:48:45 +08:00
|
|
|
ret = hns3_set_gro_and_checksum(ring, skb, l234info,
|
|
|
|
bd_base_info, ol_info);
|
2019-04-14 09:47:40 +08:00
|
|
|
if (unlikely(ret)) {
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.rx_err_cnt++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
|
|
|
|
HNS3_RXD_DMAC_S);
|
|
|
|
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.rx_pkts++;
|
|
|
|
ring->stats.rx_bytes += len;
|
|
|
|
|
|
|
|
if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
|
|
|
|
ring->stats.rx_multicast++;
|
|
|
|
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
|
|
|
|
ring->tqp_vector->rx_group.total_bytes += len;
|
2019-04-25 20:42:47 +08:00
|
|
|
|
|
|
|
hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
|
2019-04-14 09:47:40 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
|
|
|
|
struct sk_buff **out_skb)
|
|
|
|
{
|
2018-11-15 17:29:22 +08:00
|
|
|
struct sk_buff *skb = ring->skb;
|
2017-08-02 23:59:45 +08:00
|
|
|
struct hns3_desc_cb *desc_cb;
|
|
|
|
struct hns3_desc *desc;
|
|
|
|
u32 bd_base_info;
|
|
|
|
int length;
|
2018-11-15 17:29:22 +08:00
|
|
|
int ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
desc = &ring->desc[ring->next_to_clean];
|
|
|
|
desc_cb = &ring->desc_cb[ring->next_to_clean];
|
|
|
|
|
|
|
|
prefetch(desc);
|
|
|
|
|
2018-05-26 02:42:56 +08:00
|
|
|
length = le16_to_cpu(desc->rx.size);
|
2017-08-02 23:59:45 +08:00
|
|
|
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
|
|
|
|
|
|
|
|
/* Check valid BD */
|
2019-02-23 17:22:14 +08:00
|
|
|
if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
|
2018-11-15 17:29:22 +08:00
|
|
|
return -ENXIO;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-11-15 17:29:22 +08:00
|
|
|
if (!skb)
|
|
|
|
ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
/* Prefetch first cache line of first page
|
|
|
|
* Idea is to cache few bytes of the header of the packet. Our L1 Cache
|
|
|
|
* line size is 64B so need to prefetch twice to make it 128B. But in
|
|
|
|
* actual we can have greater size of caches with 128B Level 1 cache
|
|
|
|
* lines. In such a case, single fetch would suffice to cache in the
|
|
|
|
* relevant part of the header.
|
|
|
|
*/
|
2018-11-15 17:29:22 +08:00
|
|
|
prefetch(ring->va);
|
2017-08-02 23:59:45 +08:00
|
|
|
#if L1_CACHE_BYTES < 128
|
2018-11-15 17:29:22 +08:00
|
|
|
prefetch(ring->va + L1_CACHE_BYTES);
|
2017-08-02 23:59:45 +08:00
|
|
|
#endif
|
|
|
|
|
2018-11-15 17:29:22 +08:00
|
|
|
if (!skb) {
|
|
|
|
ret = hns3_alloc_skb(ring, length, ring->va);
|
|
|
|
*out_skb = skb = ring->skb;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-11-15 17:29:22 +08:00
|
|
|
if (ret < 0) /* alloc buffer fail */
|
|
|
|
return ret;
|
|
|
|
if (ret > 0) { /* need add frag */
|
|
|
|
ret = hns3_add_frag(ring, desc, &skb, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-11-15 17:29:22 +08:00
|
|
|
/* As the head data may be changed when GRO enable, copy
|
|
|
|
* the head data in after other data rx completed
|
|
|
|
*/
|
|
|
|
memcpy(skb->data, ring->va,
|
|
|
|
ALIGN(ring->pull_len, sizeof(long)));
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
} else {
|
2018-11-15 17:29:22 +08:00
|
|
|
ret = hns3_add_frag(ring, desc, &skb, true);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-11-15 17:29:22 +08:00
|
|
|
/* As the head data may be changed when GRO enable, copy
|
|
|
|
* the head data in after other data rx completed
|
|
|
|
*/
|
|
|
|
memcpy(skb->data, ring->va,
|
|
|
|
ALIGN(ring->pull_len, sizeof(long)));
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2019-04-25 20:42:47 +08:00
|
|
|
ret = hns3_handle_bdinfo(ring, skb);
|
2019-04-14 09:47:40 +08:00
|
|
|
if (unlikely(ret)) {
|
2017-08-02 23:59:45 +08:00
|
|
|
dev_kfree_skb_any(skb);
|
2019-04-14 09:47:40 +08:00
|
|
|
return ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 17:29:22 +08:00
|
|
|
*out_skb = skb;
|
2018-10-11 03:05:37 +08:00
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-20 10:19:21 +08:00
|
|
|
int hns3_clean_rx_ring(
|
|
|
|
struct hns3_enet_ring *ring, int budget,
|
|
|
|
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
|
|
|
|
int recv_pkts, recv_bds, clean_count, err;
|
2019-04-25 20:42:46 +08:00
|
|
|
int unused_count = hns3_desc_unused(ring);
|
2018-11-15 17:29:22 +08:00
|
|
|
struct sk_buff *skb = ring->skb;
|
|
|
|
int num;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
|
|
|
|
rmb(); /* Make sure num taken effect before the other data is touched */
|
|
|
|
|
|
|
|
recv_pkts = 0, recv_bds = 0, clean_count = 0;
|
|
|
|
num -= unused_count;
|
2019-04-25 20:42:46 +08:00
|
|
|
unused_count -= ring->pending_buf;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
while (recv_pkts < budget && recv_bds < num) {
|
|
|
|
/* Reuse or realloc buffers */
|
|
|
|
if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
|
|
|
|
hns3_nic_alloc_rx_buffers(ring,
|
|
|
|
clean_count + unused_count);
|
|
|
|
clean_count = 0;
|
2018-11-15 17:29:22 +08:00
|
|
|
unused_count = hns3_desc_unused(ring) -
|
|
|
|
ring->pending_buf;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Poll one pkt */
|
2018-11-15 17:29:22 +08:00
|
|
|
err = hns3_handle_rx_bd(ring, &skb);
|
2017-08-02 23:59:45 +08:00
|
|
|
if (unlikely(!skb)) /* This fault cannot be repaired */
|
|
|
|
goto out;
|
|
|
|
|
2018-11-15 17:29:22 +08:00
|
|
|
if (err == -ENXIO) { /* Do not get FE for the packet */
|
|
|
|
goto out;
|
|
|
|
} else if (unlikely(err)) { /* Do jump the err */
|
|
|
|
recv_bds += ring->pending_buf;
|
|
|
|
clean_count += ring->pending_buf;
|
|
|
|
ring->skb = NULL;
|
|
|
|
ring->pending_buf = 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-10-20 10:19:21 +08:00
|
|
|
rx_fn(ring, skb);
|
2018-11-15 17:29:22 +08:00
|
|
|
recv_bds += ring->pending_buf;
|
|
|
|
clean_count += ring->pending_buf;
|
|
|
|
ring->skb = NULL;
|
|
|
|
ring->pending_buf = 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
recv_pkts++;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* Make all data has been write before submit */
|
|
|
|
if (clean_count + unused_count > 0)
|
|
|
|
hns3_nic_alloc_rx_buffers(ring,
|
|
|
|
clean_count + unused_count);
|
|
|
|
|
|
|
|
return recv_pkts;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
|
|
|
|
{
|
2018-03-21 15:49:26 +08:00
|
|
|
struct hns3_enet_tqp_vector *tqp_vector =
|
|
|
|
ring_group->ring->tqp_vector;
|
2017-08-02 23:59:45 +08:00
|
|
|
enum hns3_flow_level_range new_flow_level;
|
2018-03-21 15:49:26 +08:00
|
|
|
int packets_per_msecs;
|
|
|
|
int bytes_per_msecs;
|
|
|
|
u32 time_passed_ms;
|
2017-08-02 23:59:45 +08:00
|
|
|
u16 new_int_gl;
|
|
|
|
|
2019-02-02 22:39:27 +08:00
|
|
|
if (!tqp_vector->last_jiffies)
|
2017-08-02 23:59:45 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (ring_group->total_packets == 0) {
|
2018-03-09 10:37:03 +08:00
|
|
|
ring_group->coal.int_gl = HNS3_INT_GL_50K;
|
|
|
|
ring_group->coal.flow_level = HNS3_FLOW_LOW;
|
2017-08-02 23:59:45 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Simple throttlerate management
|
|
|
|
* 0-10MB/s lower (50000 ints/s)
|
|
|
|
* 10-20MB/s middle (20000 ints/s)
|
|
|
|
* 20-1249MB/s high (18000 ints/s)
|
|
|
|
* > 40000pps ultra (8000 ints/s)
|
|
|
|
*/
|
2018-03-09 10:37:03 +08:00
|
|
|
new_flow_level = ring_group->coal.flow_level;
|
|
|
|
new_int_gl = ring_group->coal.int_gl;
|
2018-03-21 15:49:26 +08:00
|
|
|
time_passed_ms =
|
|
|
|
jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
|
|
|
|
|
|
|
|
if (!time_passed_ms)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
do_div(ring_group->total_packets, time_passed_ms);
|
|
|
|
packets_per_msecs = ring_group->total_packets;
|
|
|
|
|
|
|
|
do_div(ring_group->total_bytes, time_passed_ms);
|
|
|
|
bytes_per_msecs = ring_group->total_bytes;
|
|
|
|
|
|
|
|
#define HNS3_RX_LOW_BYTE_RATE 10000
|
|
|
|
#define HNS3_RX_MID_BYTE_RATE 20000
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
switch (new_flow_level) {
|
|
|
|
case HNS3_FLOW_LOW:
|
2018-03-21 15:49:26 +08:00
|
|
|
if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
|
2017-08-02 23:59:45 +08:00
|
|
|
new_flow_level = HNS3_FLOW_MID;
|
|
|
|
break;
|
|
|
|
case HNS3_FLOW_MID:
|
2018-03-21 15:49:26 +08:00
|
|
|
if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
|
2017-08-02 23:59:45 +08:00
|
|
|
new_flow_level = HNS3_FLOW_HIGH;
|
2018-03-21 15:49:26 +08:00
|
|
|
else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
|
2017-08-02 23:59:45 +08:00
|
|
|
new_flow_level = HNS3_FLOW_LOW;
|
|
|
|
break;
|
|
|
|
case HNS3_FLOW_HIGH:
|
|
|
|
case HNS3_FLOW_ULTRA:
|
|
|
|
default:
|
2018-03-21 15:49:26 +08:00
|
|
|
if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
|
2017-08-02 23:59:45 +08:00
|
|
|
new_flow_level = HNS3_FLOW_MID;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-03-21 15:49:26 +08:00
|
|
|
#define HNS3_RX_ULTRA_PACKET_RATE 40
|
|
|
|
|
|
|
|
if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
|
|
|
|
&tqp_vector->rx_group == ring_group)
|
2017-08-02 23:59:45 +08:00
|
|
|
new_flow_level = HNS3_FLOW_ULTRA;
|
|
|
|
|
|
|
|
switch (new_flow_level) {
|
|
|
|
case HNS3_FLOW_LOW:
|
|
|
|
new_int_gl = HNS3_INT_GL_50K;
|
|
|
|
break;
|
|
|
|
case HNS3_FLOW_MID:
|
|
|
|
new_int_gl = HNS3_INT_GL_20K;
|
|
|
|
break;
|
|
|
|
case HNS3_FLOW_HIGH:
|
|
|
|
new_int_gl = HNS3_INT_GL_18K;
|
|
|
|
break;
|
|
|
|
case HNS3_FLOW_ULTRA:
|
|
|
|
new_int_gl = HNS3_INT_GL_8K;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ring_group->total_bytes = 0;
|
|
|
|
ring_group->total_packets = 0;
|
2018-03-09 10:37:03 +08:00
|
|
|
ring_group->coal.flow_level = new_flow_level;
|
|
|
|
if (new_int_gl != ring_group->coal.int_gl) {
|
|
|
|
ring_group->coal.int_gl = new_int_gl;
|
2017-08-02 23:59:45 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
|
|
|
|
{
|
2018-01-12 16:23:12 +08:00
|
|
|
struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
|
|
|
|
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
|
|
|
|
bool rx_update, tx_update;
|
|
|
|
|
2018-12-18 19:37:54 +08:00
|
|
|
/* update param every 1000ms */
|
|
|
|
if (time_before(jiffies,
|
|
|
|
tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
|
2018-03-21 15:49:25 +08:00
|
|
|
return;
|
|
|
|
|
2018-03-09 10:37:03 +08:00
|
|
|
if (rx_group->coal.gl_adapt_enable) {
|
2018-01-12 16:23:12 +08:00
|
|
|
rx_update = hns3_get_new_int_gl(rx_group);
|
|
|
|
if (rx_update)
|
|
|
|
hns3_set_vector_coalesce_rx_gl(tqp_vector,
|
2018-03-09 10:37:03 +08:00
|
|
|
rx_group->coal.int_gl);
|
2018-01-12 16:23:12 +08:00
|
|
|
}
|
|
|
|
|
2018-03-09 10:37:03 +08:00
|
|
|
if (tx_group->coal.gl_adapt_enable) {
|
2019-02-02 22:39:26 +08:00
|
|
|
tx_update = hns3_get_new_int_gl(tx_group);
|
2018-01-12 16:23:12 +08:00
|
|
|
if (tx_update)
|
|
|
|
hns3_set_vector_coalesce_tx_gl(tqp_vector,
|
2018-03-09 10:37:03 +08:00
|
|
|
tx_group->coal.int_gl);
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
2018-03-21 15:49:25 +08:00
|
|
|
|
2018-03-21 15:49:26 +08:00
|
|
|
tqp_vector->last_jiffies = jiffies;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
2018-11-09 22:07:52 +08:00
|
|
|
struct hns3_nic_priv *priv = netdev_priv(napi->dev);
|
2017-08-02 23:59:45 +08:00
|
|
|
struct hns3_enet_ring *ring;
|
|
|
|
int rx_pkt_total = 0;
|
|
|
|
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vector =
|
|
|
|
container_of(napi, struct hns3_enet_tqp_vector, napi);
|
|
|
|
bool clean_complete = true;
|
2019-04-04 16:17:53 +08:00
|
|
|
int rx_budget = budget;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-11-09 22:07:52 +08:00
|
|
|
if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
|
|
|
|
napi_complete(napi);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
/* Since the actual Tx work is minimal, we can give the Tx a larger
|
|
|
|
* budget and be more aggressive about cleaning up the Tx descriptors.
|
|
|
|
*/
|
2018-09-20 01:29:49 +08:00
|
|
|
hns3_for_each_ring(ring, tqp_vector->tx_group)
|
|
|
|
hns3_clean_tx_ring(ring);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
/* make sure rx ring budget not smaller than 1 */
|
2019-04-04 16:17:53 +08:00
|
|
|
if (tqp_vector->num_tqps > 1)
|
|
|
|
rx_budget = max(budget / tqp_vector->num_tqps, 1);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
hns3_for_each_ring(ring, tqp_vector->rx_group) {
|
2017-10-20 10:19:21 +08:00
|
|
|
int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
|
|
|
|
hns3_rx_skb);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
if (rx_cleaned >= rx_budget)
|
|
|
|
clean_complete = false;
|
|
|
|
|
|
|
|
rx_pkt_total += rx_cleaned;
|
|
|
|
}
|
|
|
|
|
|
|
|
tqp_vector->rx_group.total_packets += rx_pkt_total;
|
|
|
|
|
|
|
|
if (!clean_complete)
|
|
|
|
return budget;
|
|
|
|
|
2018-12-18 19:37:50 +08:00
|
|
|
if (napi_complete(napi) &&
|
|
|
|
likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
|
2018-11-09 22:07:52 +08:00
|
|
|
hns3_update_new_int_gl(tqp_vector);
|
|
|
|
hns3_mask_vector_irq(tqp_vector, 1);
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
return rx_pkt_total;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
|
struct hnae3_ring_chain_node *head)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = tqp_vector->handle->pdev;
|
|
|
|
struct hnae3_ring_chain_node *cur_chain = head;
|
|
|
|
struct hnae3_ring_chain_node *chain;
|
|
|
|
struct hns3_enet_ring *tx_ring;
|
|
|
|
struct hns3_enet_ring *rx_ring;
|
|
|
|
|
|
|
|
tx_ring = tqp_vector->tx_group.ring;
|
|
|
|
if (tx_ring) {
|
|
|
|
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
|
|
|
|
HNAE3_RING_TYPE_TX);
|
|
|
|
hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
|
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
cur_chain->next = NULL;
|
|
|
|
|
|
|
|
while (tx_ring->next) {
|
|
|
|
tx_ring = tx_ring->next;
|
|
|
|
|
|
|
|
chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!chain)
|
2018-10-30 21:50:44 +08:00
|
|
|
goto err_free_chain;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
cur_chain->next = chain;
|
|
|
|
chain->tqp_index = tx_ring->tqp->tqp_index;
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
|
|
|
|
HNAE3_RING_TYPE_TX);
|
|
|
|
hnae3_set_field(chain->int_gl_idx,
|
|
|
|
HNAE3_RING_GL_IDX_M,
|
|
|
|
HNAE3_RING_GL_IDX_S,
|
|
|
|
HNAE3_RING_GL_TX);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
cur_chain = chain;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_ring = tqp_vector->rx_group.ring;
|
|
|
|
if (!tx_ring && rx_ring) {
|
|
|
|
cur_chain->next = NULL;
|
|
|
|
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
|
|
|
|
HNAE3_RING_TYPE_RX);
|
|
|
|
hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
|
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
rx_ring = rx_ring->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (rx_ring) {
|
|
|
|
chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
|
|
|
|
if (!chain)
|
2018-10-30 21:50:44 +08:00
|
|
|
goto err_free_chain;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
cur_chain->next = chain;
|
|
|
|
chain->tqp_index = rx_ring->tqp->tqp_index;
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
|
|
|
|
HNAE3_RING_TYPE_RX);
|
|
|
|
hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
|
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
|
2018-01-12 16:23:15 +08:00
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
cur_chain = chain;
|
|
|
|
|
|
|
|
rx_ring = rx_ring->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2018-10-30 21:50:44 +08:00
|
|
|
|
|
|
|
err_free_chain:
|
|
|
|
cur_chain = head->next;
|
|
|
|
while (cur_chain) {
|
|
|
|
chain = cur_chain->next;
|
2018-12-18 19:37:48 +08:00
|
|
|
devm_kfree(&pdev->dev, cur_chain);
|
2018-10-30 21:50:44 +08:00
|
|
|
cur_chain = chain;
|
|
|
|
}
|
2018-12-18 19:37:48 +08:00
|
|
|
head->next = NULL;
|
2018-10-30 21:50:44 +08:00
|
|
|
|
|
|
|
return -ENOMEM;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
|
struct hnae3_ring_chain_node *head)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = tqp_vector->handle->pdev;
|
|
|
|
struct hnae3_ring_chain_node *chain_tmp, *chain;
|
|
|
|
|
|
|
|
chain = head->next;
|
|
|
|
|
|
|
|
while (chain) {
|
|
|
|
chain_tmp = chain->next;
|
|
|
|
devm_kfree(&pdev->dev, chain);
|
|
|
|
chain = chain_tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
|
|
|
|
struct hns3_enet_ring *ring)
|
|
|
|
{
|
|
|
|
ring->next = group->ring;
|
|
|
|
group->ring = ring;
|
|
|
|
|
|
|
|
group->count++;
|
|
|
|
}
|
|
|
|
|
2018-09-20 01:29:47 +08:00
|
|
|
static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = priv->ae_handle->pdev;
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vector;
|
|
|
|
int num_vectors = priv->vector_num;
|
|
|
|
int numa_node;
|
|
|
|
int vector_i;
|
|
|
|
|
|
|
|
numa_node = dev_to_node(&pdev->dev);
|
|
|
|
|
|
|
|
for (vector_i = 0; vector_i < num_vectors; vector_i++) {
|
|
|
|
tqp_vector = &priv->tqp_vector[vector_i];
|
|
|
|
cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
|
|
|
|
&tqp_vector->affinity_mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct hnae3_ring_chain_node vector_ring_chain;
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vector;
|
|
|
|
int ret = 0;
|
2018-10-30 21:50:43 +08:00
|
|
|
int i;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-09-20 01:29:47 +08:00
|
|
|
hns3_nic_set_cpumask(priv);
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
for (i = 0; i < priv->vector_num; i++) {
|
|
|
|
tqp_vector = &priv->tqp_vector[i];
|
|
|
|
hns3_vector_gl_rl_init_hw(tqp_vector, priv);
|
|
|
|
tqp_vector->num_tqps = 0;
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
|
|
|
u16 vector_i = i % priv->vector_num;
|
|
|
|
u16 tqp_num = h->kinfo.num_tqps;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
tqp_vector = &priv->tqp_vector[vector_i];
|
|
|
|
|
|
|
|
hns3_add_ring_to_group(&tqp_vector->tx_group,
|
|
|
|
priv->ring_data[i].ring);
|
|
|
|
|
|
|
|
hns3_add_ring_to_group(&tqp_vector->rx_group,
|
|
|
|
priv->ring_data[i + tqp_num].ring);
|
|
|
|
|
|
|
|
priv->ring_data[i].ring->tqp_vector = tqp_vector;
|
|
|
|
priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
|
2018-03-09 10:37:02 +08:00
|
|
|
tqp_vector->num_tqps++;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
for (i = 0; i < priv->vector_num; i++) {
|
2017-08-02 23:59:45 +08:00
|
|
|
tqp_vector = &priv->tqp_vector[i];
|
|
|
|
|
|
|
|
tqp_vector->rx_group.total_bytes = 0;
|
|
|
|
tqp_vector->rx_group.total_packets = 0;
|
|
|
|
tqp_vector->tx_group.total_bytes = 0;
|
|
|
|
tqp_vector->tx_group.total_packets = 0;
|
|
|
|
tqp_vector->handle = h;
|
|
|
|
|
|
|
|
ret = hns3_get_vector_ring_chain(tqp_vector,
|
|
|
|
&vector_ring_chain);
|
|
|
|
if (ret)
|
2018-12-18 19:37:48 +08:00
|
|
|
goto map_ring_fail;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
ret = h->ae_algo->ops->map_ring_to_vector(h,
|
|
|
|
tqp_vector->vector_irq, &vector_ring_chain);
|
|
|
|
|
|
|
|
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
if (ret)
|
2018-10-30 21:50:43 +08:00
|
|
|
goto map_ring_fail;
|
2018-03-09 10:37:02 +08:00
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
netif_napi_add(priv->netdev, &tqp_vector->napi,
|
|
|
|
hns3_nic_common_poll, NAPI_POLL_WEIGHT);
|
|
|
|
}
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
return 0;
|
2018-10-30 21:50:43 +08:00
|
|
|
|
|
|
|
map_ring_fail:
|
|
|
|
while (i--)
|
|
|
|
netif_napi_del(&priv->tqp_vector[i].napi);
|
|
|
|
|
|
|
|
return ret;
|
2018-03-09 10:37:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
|
|
|
|
{
|
2018-12-20 11:52:01 +08:00
|
|
|
#define HNS3_VECTOR_PF_MAX_NUM 64
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vector;
|
|
|
|
struct hnae3_vector_info *vector;
|
|
|
|
struct pci_dev *pdev = h->pdev;
|
|
|
|
u16 tqp_num = h->kinfo.num_tqps;
|
|
|
|
u16 vector_num;
|
|
|
|
int ret = 0;
|
|
|
|
u16 i;
|
|
|
|
|
|
|
|
/* RSS size, cpu online and vector_num should be the same */
|
|
|
|
/* Should consider 2p/4p later */
|
|
|
|
vector_num = min_t(u16, num_online_cpus(), tqp_num);
|
2018-12-20 11:52:01 +08:00
|
|
|
vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!vector)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
|
|
|
|
|
|
|
|
priv->vector_num = vector_num;
|
|
|
|
priv->tqp_vector = (struct hns3_enet_tqp_vector *)
|
|
|
|
devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!priv->tqp_vector) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < priv->vector_num; i++) {
|
|
|
|
tqp_vector = &priv->tqp_vector[i];
|
|
|
|
tqp_vector->idx = i;
|
|
|
|
tqp_vector->mask_addr = vector[i].io_addr;
|
|
|
|
tqp_vector->vector_irq = vector[i].vector;
|
|
|
|
hns3_vector_gl_rl_init(tqp_vector, priv);
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
out:
|
|
|
|
devm_kfree(&pdev->dev, vector);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
|
|
|
|
{
|
|
|
|
group->ring = NULL;
|
|
|
|
group->count = 0;
|
|
|
|
}
|
|
|
|
|
2019-01-31 04:55:47 +08:00
|
|
|
static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct hnae3_ring_chain_node vector_ring_chain;
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vector;
|
2019-01-31 04:55:47 +08:00
|
|
|
int i;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
for (i = 0; i < priv->vector_num; i++) {
|
|
|
|
tqp_vector = &priv->tqp_vector[i];
|
|
|
|
|
2019-01-18 16:13:03 +08:00
|
|
|
if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
|
|
|
|
continue;
|
|
|
|
|
2019-01-31 04:55:47 +08:00
|
|
|
hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-01-31 04:55:47 +08:00
|
|
|
h->ae_algo->ops->unmap_ring_from_vector(h,
|
2017-08-02 23:59:45 +08:00
|
|
|
tqp_vector->vector_irq, &vector_ring_chain);
|
|
|
|
|
|
|
|
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
|
|
|
|
|
2018-12-18 19:37:53 +08:00
|
|
|
if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
|
|
|
|
irq_set_affinity_notifier(tqp_vector->vector_irq,
|
|
|
|
NULL);
|
|
|
|
irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
|
|
|
|
free_irq(tqp_vector->vector_irq, tqp_vector);
|
|
|
|
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
hns3_clear_ring_group(&tqp_vector->rx_group);
|
|
|
|
hns3_clear_ring_group(&tqp_vector->tx_group);
|
2017-08-02 23:59:45 +08:00
|
|
|
netif_napi_del(&priv->tqp_vector[i].napi);
|
|
|
|
}
|
2018-03-09 10:37:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
struct pci_dev *pdev = h->pdev;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->vector_num; i++) {
|
|
|
|
struct hns3_enet_tqp_vector *tqp_vector;
|
|
|
|
|
|
|
|
tqp_vector = &priv->tqp_vector[i];
|
|
|
|
ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
devm_kfree(&pdev->dev, priv->tqp_vector);
|
2017-08-02 23:59:45 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
|
|
|
|
int ring_type)
|
|
|
|
{
|
|
|
|
struct hns3_nic_ring_data *ring_data = priv->ring_data;
|
|
|
|
int queue_num = priv->ae_handle->kinfo.num_tqps;
|
|
|
|
struct pci_dev *pdev = priv->ae_handle->pdev;
|
|
|
|
struct hns3_enet_ring *ring;
|
2019-02-23 17:22:15 +08:00
|
|
|
int desc_num;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
|
|
|
|
if (!ring)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (ring_type == HNAE3_RING_TYPE_TX) {
|
2019-02-23 17:22:15 +08:00
|
|
|
desc_num = priv->ae_handle->kinfo.num_tx_desc;
|
2017-08-02 23:59:45 +08:00
|
|
|
ring_data[q->tqp_index].ring = ring;
|
2017-10-23 19:51:05 +08:00
|
|
|
ring_data[q->tqp_index].queue_index = q->tqp_index;
|
2017-08-02 23:59:45 +08:00
|
|
|
ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
|
|
|
|
} else {
|
2019-02-23 17:22:15 +08:00
|
|
|
desc_num = priv->ae_handle->kinfo.num_rx_desc;
|
2017-08-02 23:59:45 +08:00
|
|
|
ring_data[q->tqp_index + queue_num].ring = ring;
|
2017-10-23 19:51:05 +08:00
|
|
|
ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
|
2017-08-02 23:59:45 +08:00
|
|
|
ring->io_base = q->io_base;
|
|
|
|
}
|
|
|
|
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
ring->tqp = q;
|
|
|
|
ring->desc = NULL;
|
|
|
|
ring->desc_cb = NULL;
|
|
|
|
ring->dev = priv->dev;
|
|
|
|
ring->desc_dma_addr = 0;
|
|
|
|
ring->buf_size = q->buf_size;
|
2019-01-18 16:13:03 +08:00
|
|
|
ring->desc_num = desc_num;
|
2017-08-02 23:59:45 +08:00
|
|
|
ring->next_to_use = 0;
|
|
|
|
ring->next_to_clean = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_queue_to_ring(struct hnae3_queue *tqp,
|
|
|
|
struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
|
2018-10-30 21:50:44 +08:00
|
|
|
if (ret) {
|
|
|
|
devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
|
2017-08-02 23:59:45 +08:00
|
|
|
return ret;
|
2018-10-30 21:50:44 +08:00
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_get_ring_config(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
struct pci_dev *pdev = h->pdev;
|
|
|
|
int i, ret;
|
|
|
|
|
treewide: devm_kzalloc() -> devm_kcalloc()
The devm_kzalloc() function has a 2-factor argument form, devm_kcalloc().
This patch replaces cases of:
devm_kzalloc(handle, a * b, gfp)
with:
devm_kcalloc(handle, a * b, gfp)
as well as handling cases of:
devm_kzalloc(handle, a * b * c, gfp)
with:
devm_kzalloc(handle, array3_size(a, b, c), gfp)
as it's slightly less ugly than:
devm_kcalloc(handle, array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
devm_kzalloc(handle, 4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
Some manual whitespace fixes were needed in this patch, as Coccinelle
really liked to write "=devm_kcalloc..." instead of "= devm_kcalloc...".
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
expression HANDLE;
type TYPE;
expression THING, E;
@@
(
devm_kzalloc(HANDLE,
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
devm_kzalloc(HANDLE,
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression HANDLE;
expression COUNT;
typedef u8;
typedef __u8;
@@
(
devm_kzalloc(HANDLE,
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
expression HANDLE;
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
expression HANDLE;
identifier SIZE, COUNT;
@@
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression HANDLE;
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression HANDLE;
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
expression HANDLE;
identifier STRIDE, SIZE, COUNT;
@@
(
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression HANDLE;
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE,
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression HANDLE;
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, sizeof(THING) * C2, ...)
|
devm_kzalloc(HANDLE, sizeof(TYPE) * C2, ...)
|
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE, C1 * C2, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * E2
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * (E2)
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:07:58 +08:00
|
|
|
priv->ring_data = devm_kzalloc(&pdev->dev,
|
|
|
|
array3_size(h->kinfo.num_tqps,
|
|
|
|
sizeof(*priv->ring_data),
|
|
|
|
2),
|
2017-08-02 23:59:45 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!priv->ring_data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
|
|
|
ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
2018-10-30 21:50:44 +08:00
|
|
|
while (i--) {
|
|
|
|
devm_kfree(priv->dev, priv->ring_data[i].ring);
|
|
|
|
devm_kfree(priv->dev,
|
|
|
|
priv->ring_data[i + h->kinfo.num_tqps].ring);
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
devm_kfree(&pdev->dev, priv->ring_data);
|
2019-04-25 20:42:54 +08:00
|
|
|
priv->ring_data = NULL;
|
2017-08-02 23:59:45 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-12-22 12:21:41 +08:00
|
|
|
static void hns3_put_ring_config(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
int i;
|
|
|
|
|
2019-04-25 20:42:54 +08:00
|
|
|
if (!priv->ring_data)
|
|
|
|
return;
|
|
|
|
|
2017-12-22 12:21:41 +08:00
|
|
|
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
|
|
|
devm_kfree(priv->dev, priv->ring_data[i].ring);
|
|
|
|
devm_kfree(priv->dev,
|
|
|
|
priv->ring_data[i + h->kinfo.num_tqps].ring);
|
|
|
|
}
|
|
|
|
devm_kfree(priv->dev, priv->ring_data);
|
2019-04-25 20:42:54 +08:00
|
|
|
priv->ring_data = NULL;
|
2017-12-22 12:21:41 +08:00
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (ring->desc_num <= 0 || ring->buf_size <= 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ring->desc_cb) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = hns3_alloc_desc(ring);
|
|
|
|
if (ret)
|
|
|
|
goto out_with_desc_cb;
|
|
|
|
|
|
|
|
if (!HNAE3_IS_TX_RING(ring)) {
|
|
|
|
ret = hns3_alloc_ring_buffers(ring);
|
|
|
|
if (ret)
|
|
|
|
goto out_with_desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_with_desc:
|
|
|
|
hns3_free_desc(ring);
|
|
|
|
out_with_desc_cb:
|
|
|
|
kfree(ring->desc_cb);
|
|
|
|
ring->desc_cb = NULL;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_fini_ring(struct hns3_enet_ring *ring)
|
|
|
|
{
|
|
|
|
hns3_free_desc(ring);
|
|
|
|
kfree(ring->desc_cb);
|
|
|
|
ring->desc_cb = NULL;
|
|
|
|
ring->next_to_clean = 0;
|
|
|
|
ring->next_to_use = 0;
|
2019-01-23 07:39:33 +08:00
|
|
|
ring->pending_buf = 0;
|
|
|
|
if (ring->skb) {
|
|
|
|
dev_kfree_skb_any(ring->skb);
|
|
|
|
ring->skb = NULL;
|
|
|
|
}
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
2017-10-09 15:44:01 +08:00
|
|
|
static int hns3_buf_size2type(u32 buf_size)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
int bd_size_type;
|
|
|
|
|
|
|
|
switch (buf_size) {
|
|
|
|
case 512:
|
|
|
|
bd_size_type = HNS3_BD_SIZE_512_TYPE;
|
|
|
|
break;
|
|
|
|
case 1024:
|
|
|
|
bd_size_type = HNS3_BD_SIZE_1024_TYPE;
|
|
|
|
break;
|
|
|
|
case 2048:
|
|
|
|
bd_size_type = HNS3_BD_SIZE_2048_TYPE;
|
|
|
|
break;
|
|
|
|
case 4096:
|
|
|
|
bd_size_type = HNS3_BD_SIZE_4096_TYPE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
bd_size_type = HNS3_BD_SIZE_2048_TYPE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return bd_size_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
|
|
|
|
{
|
|
|
|
dma_addr_t dma = ring->desc_dma_addr;
|
|
|
|
struct hnae3_queue *q = ring->tqp;
|
|
|
|
|
|
|
|
if (!HNAE3_IS_TX_RING(ring)) {
|
|
|
|
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
|
|
|
|
(u32)dma);
|
|
|
|
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
|
|
|
|
(u32)((dma >> 31) >> 1));
|
|
|
|
|
|
|
|
hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
|
|
|
|
hns3_buf_size2type(ring->buf_size));
|
|
|
|
hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
|
|
|
|
ring->desc_num / 8 - 1);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
|
|
|
|
(u32)dma);
|
|
|
|
hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
|
|
|
|
(u32)((dma >> 31) >> 1));
|
|
|
|
|
|
|
|
hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
|
|
|
|
ring->desc_num / 8 - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-15 00:13:18 +08:00
|
|
|
static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
|
|
|
struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
|
|
|
|
int j;
|
|
|
|
|
|
|
|
if (!tc_info->enable)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (j = 0; j < tc_info->tqp_count; j++) {
|
|
|
|
struct hnae3_queue *q;
|
|
|
|
|
|
|
|
q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
|
|
|
|
hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
|
|
|
|
tc_info->tc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-10 16:42:04 +08:00
|
|
|
int hns3_init_all_ring(struct hns3_nic_priv *priv)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
int ring_num = h->kinfo.num_tqps * 2;
|
|
|
|
int i, j;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = 0; i < ring_num; i++) {
|
|
|
|
ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(priv->dev,
|
|
|
|
"Alloc ring memory fail! ret=%d\n", ret);
|
|
|
|
goto out_when_alloc_ring_memory;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64_stats_init(&priv->ring_data[i].ring->syncp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_when_alloc_ring_memory:
|
|
|
|
for (j = i - 1; j >= 0; j--)
|
2017-10-10 16:42:03 +08:00
|
|
|
hns3_fini_ring(priv->ring_data[j].ring);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-10-10 16:42:04 +08:00
|
|
|
int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
|
|
|
hns3_fini_ring(priv->ring_data[i].ring);
|
|
|
|
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set mac addr if it is configured. or leave it to the AE driver */
|
2018-10-30 21:50:50 +08:00
|
|
|
static int hns3_init_mac_addr(struct net_device *netdev, bool init)
|
2017-08-02 23:59:45 +08:00
|
|
|
{
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
|
u8 mac_addr_temp[ETH_ALEN];
|
2018-10-30 21:50:50 +08:00
|
|
|
int ret = 0;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-05-10 00:24:38 +08:00
|
|
|
if (h->ae_algo->ops->get_mac_addr && init) {
|
2017-08-02 23:59:45 +08:00
|
|
|
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
|
|
|
|
ether_addr_copy(netdev->dev_addr, mac_addr_temp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the MAC address is valid, if not get a random one */
|
|
|
|
if (!is_valid_ether_addr(netdev->dev_addr)) {
|
|
|
|
eth_hw_addr_random(netdev);
|
|
|
|
dev_warn(priv->dev, "using random MAC address %pM\n",
|
|
|
|
netdev->dev_addr);
|
|
|
|
}
|
2017-09-20 00:17:13 +08:00
|
|
|
|
|
|
|
if (h->ae_algo->ops->set_mac_addr)
|
2018-10-30 21:50:50 +08:00
|
|
|
ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
|
2017-09-20 00:17:13 +08:00
|
|
|
|
2018-10-30 21:50:50 +08:00
|
|
|
return ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
|
net: hns3: Fix NULL deref when unloading driver
When the driver is unloading, if there is a calling of ndo_open occurs
between phy_disconnect() and unregister_netdev(), it will end up
causing the kernel to eventually hit a NULL deref:
[14942.417828] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000048
[14942.529878] Mem abort info:
[14942.551166] ESR = 0x96000006
[14942.567070] Exception class = DABT (current EL), IL = 32 bits
[14942.623081] SET = 0, FnV = 0
[14942.639112] EA = 0, S1PTW = 0
[14942.643628] Data abort info:
[14942.659227] ISV = 0, ISS = 0x00000006
[14942.674870] CM = 0, WnR = 0
[14942.679449] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000224ad6ad
[14942.695595] [0000000000000048] pgd=00000021e6673003, pud=00000021dbf01003, pmd=0000000000000000
[14942.723163] Internal error: Oops: 96000006 [#1] PREEMPT SMP
[14942.729358] Modules linked in: hns3(O) hclge(O) pv680_mii(O) hnae3(O) [last unloaded: hclge]
[14942.738907] CPU: 1 PID: 26629 Comm: kworker/u4:13 Tainted: G O 4.18.0-rc1-12928-ga960791-dirty #145
[14942.749491] Hardware name: Huawei Technologies Co., Ltd. D05/D05, BIOS Hi1620 FPGA TB BOOT BIOS B763 08/17/2018
[14942.760392] Workqueue: events_power_efficient phy_state_machine
[14942.766644] pstate: 80c00009 (Nzcv daif +PAN +UAO)
[14942.771918] pc : test_and_set_bit+0x18/0x38
[14942.776589] lr : netif_carrier_off+0x24/0x70
[14942.781033] sp : ffff0000121abd20
[14942.784518] x29: ffff0000121abd20 x28: 0000000000000000
[14942.790208] x27: ffff0000164d3cd8 x26: ffff8021da68b7b8
[14942.795832] x25: 0000000000000000 x24: ffff8021eb407800
[14942.801445] x23: 0000000000000000 x22: 0000000000000000
[14942.807046] x21: 0000000000000001 x20: 0000000000000000
[14942.812672] x19: 0000000000000000 x18: ffff000009781708
[14942.818284] x17: 00000000004970e8 x16: ffff00000816ad48
[14942.823900] x15: 0000000000000000 x14: 0000000000000008
[14942.829528] x13: 0000000000000000 x12: 0000000000000f65
[14942.835149] x11: 0000000000000001 x10: 00000000000009d0
[14942.840753] x9 : ffff0000121abaa0 x8 : 0000000000000000
[14942.846360] x7 : ffff000009781708 x6 : 0000000000000003
[14942.851970] x5 : 0000000000000020 x4 : 0000000000000004
[14942.857575] x3 : 0000000000000002 x2 : 0000000000000001
[14942.863180] x1 : 0000000000000048 x0 : 0000000000000000
[14942.868875] Process kworker/u4:13 (pid: 26629, stack limit = 0x00000000c909dbf3)
[14942.876464] Call trace:
[14942.879200] test_and_set_bit+0x18/0x38
[14942.883376] phy_link_change+0x38/0x78
[14942.887378] phy_state_machine+0x3dc/0x4f8
[14942.891968] process_one_work+0x158/0x470
[14942.896223] worker_thread+0x50/0x470
[14942.900219] kthread+0x104/0x130
[14942.903905] ret_from_fork+0x10/0x1c
[14942.907755] Code: d2800022 8b400c21 f9800031 9ac32044 (c85f7c22)
[14942.914185] ---[ end trace 968c9e12eb740b23 ]---
So this patch fixes it by modifying the timing to do phy_connect_direct()
and phy_disconnect().
Fixes: 256727da7395 ("net: hns3: Add MDIO support to HNS3 Ethernet driver for hip08 SoC")
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-31 04:55:46 +08:00
|
|
|
static int hns3_init_phy(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (h->ae_algo->ops->mac_connect_phy)
|
|
|
|
ret = h->ae_algo->ops->mac_connect_phy(h);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_uninit_phy(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
|
|
|
|
|
|
if (h->ae_algo->ops->mac_disconnect_phy)
|
|
|
|
h->ae_algo->ops->mac_disconnect_phy(h);
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:46:45 +08:00
|
|
|
static int hns3_restore_fd_rules(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (h->ae_algo->ops->restore_fd_rules)
|
|
|
|
ret = h->ae_algo->ops->restore_fd_rules(h);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
|
|
|
|
|
|
if (h->ae_algo->ops->del_all_fd_entries)
|
|
|
|
h->ae_algo->ops->del_all_fd_entries(h, clear_list);
|
|
|
|
}
|
|
|
|
|
2018-11-18 11:19:12 +08:00
|
|
|
static int hns3_client_start(struct hnae3_handle *handle)
|
|
|
|
{
|
|
|
|
if (!handle->ae_algo->ops->client_start)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return handle->ae_algo->ops->client_start(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_client_stop(struct hnae3_handle *handle)
|
|
|
|
{
|
|
|
|
if (!handle->ae_algo->ops->client_stop)
|
|
|
|
return;
|
|
|
|
|
|
|
|
handle->ae_algo->ops->client_stop(handle);
|
|
|
|
}
|
|
|
|
|
2019-04-19 11:05:43 +08:00
|
|
|
static void hns3_info_show(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
|
|
|
|
|
|
|
|
dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
|
|
|
|
dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
|
|
|
|
dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
|
|
|
|
dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
|
|
|
|
dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
|
|
|
|
dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
|
|
|
|
dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
|
|
|
|
dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
|
|
|
|
dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
static int hns3_client_init(struct hnae3_handle *handle)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = handle->pdev;
|
2018-09-27 02:28:39 +08:00
|
|
|
u16 alloc_tqps, max_rss_size;
|
2017-08-02 23:59:45 +08:00
|
|
|
struct hns3_nic_priv *priv;
|
|
|
|
struct net_device *netdev;
|
|
|
|
int ret;
|
|
|
|
|
2018-09-27 02:28:39 +08:00
|
|
|
handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
|
|
|
|
&max_rss_size);
|
|
|
|
netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
|
2017-08-02 23:59:45 +08:00
|
|
|
if (!netdev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
priv = netdev_priv(netdev);
|
|
|
|
priv->dev = &pdev->dev;
|
|
|
|
priv->netdev = netdev;
|
|
|
|
priv->ae_handle = handle;
|
2017-11-02 20:45:20 +08:00
|
|
|
priv->tx_timeout_count = 0;
|
2019-01-27 00:49:20 +08:00
|
|
|
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2019-04-19 11:05:43 +08:00
|
|
|
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
handle->kinfo.netdev = netdev;
|
|
|
|
handle->priv = (void *)priv;
|
|
|
|
|
2018-05-10 00:24:38 +08:00
|
|
|
hns3_init_mac_addr(netdev, true);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
hns3_set_default_feature(netdev);
|
|
|
|
|
|
|
|
netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
|
|
|
|
netdev->priv_flags |= IFF_UNICAST_FLT;
|
|
|
|
netdev->netdev_ops = &hns3_nic_netdev_ops;
|
|
|
|
SET_NETDEV_DEV(netdev, &pdev->dev);
|
|
|
|
hns3_ethtool_set_ops(netdev);
|
|
|
|
|
|
|
|
/* Carrier off reporting is important to ethtool even BEFORE open */
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
|
|
|
ret = hns3_get_ring_config(priv);
|
|
|
|
if (ret) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_get_ring_cfg;
|
|
|
|
}
|
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
ret = hns3_nic_alloc_vector_data(priv);
|
|
|
|
if (ret) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_alloc_vector_data;
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
ret = hns3_nic_init_vector_data(priv);
|
|
|
|
if (ret) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_init_vector_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = hns3_init_all_ring(priv);
|
|
|
|
if (ret) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_init_ring_data;
|
|
|
|
}
|
|
|
|
|
net: hns3: Fix NULL deref when unloading driver
When the driver is unloading, if there is a calling of ndo_open occurs
between phy_disconnect() and unregister_netdev(), it will end up
causing the kernel to eventually hit a NULL deref:
[14942.417828] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000048
[14942.529878] Mem abort info:
[14942.551166] ESR = 0x96000006
[14942.567070] Exception class = DABT (current EL), IL = 32 bits
[14942.623081] SET = 0, FnV = 0
[14942.639112] EA = 0, S1PTW = 0
[14942.643628] Data abort info:
[14942.659227] ISV = 0, ISS = 0x00000006
[14942.674870] CM = 0, WnR = 0
[14942.679449] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000224ad6ad
[14942.695595] [0000000000000048] pgd=00000021e6673003, pud=00000021dbf01003, pmd=0000000000000000
[14942.723163] Internal error: Oops: 96000006 [#1] PREEMPT SMP
[14942.729358] Modules linked in: hns3(O) hclge(O) pv680_mii(O) hnae3(O) [last unloaded: hclge]
[14942.738907] CPU: 1 PID: 26629 Comm: kworker/u4:13 Tainted: G O 4.18.0-rc1-12928-ga960791-dirty #145
[14942.749491] Hardware name: Huawei Technologies Co., Ltd. D05/D05, BIOS Hi1620 FPGA TB BOOT BIOS B763 08/17/2018
[14942.760392] Workqueue: events_power_efficient phy_state_machine
[14942.766644] pstate: 80c00009 (Nzcv daif +PAN +UAO)
[14942.771918] pc : test_and_set_bit+0x18/0x38
[14942.776589] lr : netif_carrier_off+0x24/0x70
[14942.781033] sp : ffff0000121abd20
[14942.784518] x29: ffff0000121abd20 x28: 0000000000000000
[14942.790208] x27: ffff0000164d3cd8 x26: ffff8021da68b7b8
[14942.795832] x25: 0000000000000000 x24: ffff8021eb407800
[14942.801445] x23: 0000000000000000 x22: 0000000000000000
[14942.807046] x21: 0000000000000001 x20: 0000000000000000
[14942.812672] x19: 0000000000000000 x18: ffff000009781708
[14942.818284] x17: 00000000004970e8 x16: ffff00000816ad48
[14942.823900] x15: 0000000000000000 x14: 0000000000000008
[14942.829528] x13: 0000000000000000 x12: 0000000000000f65
[14942.835149] x11: 0000000000000001 x10: 00000000000009d0
[14942.840753] x9 : ffff0000121abaa0 x8 : 0000000000000000
[14942.846360] x7 : ffff000009781708 x6 : 0000000000000003
[14942.851970] x5 : 0000000000000020 x4 : 0000000000000004
[14942.857575] x3 : 0000000000000002 x2 : 0000000000000001
[14942.863180] x1 : 0000000000000048 x0 : 0000000000000000
[14942.868875] Process kworker/u4:13 (pid: 26629, stack limit = 0x00000000c909dbf3)
[14942.876464] Call trace:
[14942.879200] test_and_set_bit+0x18/0x38
[14942.883376] phy_link_change+0x38/0x78
[14942.887378] phy_state_machine+0x3dc/0x4f8
[14942.891968] process_one_work+0x158/0x470
[14942.896223] worker_thread+0x50/0x470
[14942.900219] kthread+0x104/0x130
[14942.903905] ret_from_fork+0x10/0x1c
[14942.907755] Code: d2800022 8b400c21 f9800031 9ac32044 (c85f7c22)
[14942.914185] ---[ end trace 968c9e12eb740b23 ]---
So this patch fixes it by modifying the timing to do phy_connect_direct()
and phy_disconnect().
Fixes: 256727da7395 ("net: hns3: Add MDIO support to HNS3 Ethernet driver for hip08 SoC")
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-31 04:55:46 +08:00
|
|
|
ret = hns3_init_phy(netdev);
|
|
|
|
if (ret)
|
|
|
|
goto out_init_phy;
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
ret = register_netdev(netdev);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(priv->dev, "probe register netdev fail!\n");
|
|
|
|
goto out_reg_netdev_fail;
|
|
|
|
}
|
|
|
|
|
2018-11-18 11:19:12 +08:00
|
|
|
ret = hns3_client_start(handle);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
|
2019-02-23 17:22:19 +08:00
|
|
|
goto out_client_start;
|
2018-11-18 11:19:12 +08:00
|
|
|
}
|
|
|
|
|
2017-09-27 09:45:30 +08:00
|
|
|
hns3_dcbnl_setup(handle);
|
|
|
|
|
2018-11-22 22:09:41 +08:00
|
|
|
hns3_dbg_init(handle);
|
|
|
|
|
2018-11-18 11:19:10 +08:00
|
|
|
/* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
|
2018-11-18 11:19:11 +08:00
|
|
|
netdev->max_mtu = HNS3_MAX_MTU;
|
2017-08-22 00:05:24 +08:00
|
|
|
|
2018-11-07 12:06:07 +08:00
|
|
|
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
|
|
|
|
|
2019-04-19 11:05:43 +08:00
|
|
|
if (netif_msg_drv(handle))
|
|
|
|
hns3_info_show(priv);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
return ret;
|
|
|
|
|
2019-02-23 17:22:19 +08:00
|
|
|
out_client_start:
|
|
|
|
unregister_netdev(netdev);
|
2017-08-02 23:59:45 +08:00
|
|
|
out_reg_netdev_fail:
|
net: hns3: Fix NULL deref when unloading driver
When the driver is unloading, if there is a calling of ndo_open occurs
between phy_disconnect() and unregister_netdev(), it will end up
causing the kernel to eventually hit a NULL deref:
[14942.417828] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000048
[14942.529878] Mem abort info:
[14942.551166] ESR = 0x96000006
[14942.567070] Exception class = DABT (current EL), IL = 32 bits
[14942.623081] SET = 0, FnV = 0
[14942.639112] EA = 0, S1PTW = 0
[14942.643628] Data abort info:
[14942.659227] ISV = 0, ISS = 0x00000006
[14942.674870] CM = 0, WnR = 0
[14942.679449] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000224ad6ad
[14942.695595] [0000000000000048] pgd=00000021e6673003, pud=00000021dbf01003, pmd=0000000000000000
[14942.723163] Internal error: Oops: 96000006 [#1] PREEMPT SMP
[14942.729358] Modules linked in: hns3(O) hclge(O) pv680_mii(O) hnae3(O) [last unloaded: hclge]
[14942.738907] CPU: 1 PID: 26629 Comm: kworker/u4:13 Tainted: G O 4.18.0-rc1-12928-ga960791-dirty #145
[14942.749491] Hardware name: Huawei Technologies Co., Ltd. D05/D05, BIOS Hi1620 FPGA TB BOOT BIOS B763 08/17/2018
[14942.760392] Workqueue: events_power_efficient phy_state_machine
[14942.766644] pstate: 80c00009 (Nzcv daif +PAN +UAO)
[14942.771918] pc : test_and_set_bit+0x18/0x38
[14942.776589] lr : netif_carrier_off+0x24/0x70
[14942.781033] sp : ffff0000121abd20
[14942.784518] x29: ffff0000121abd20 x28: 0000000000000000
[14942.790208] x27: ffff0000164d3cd8 x26: ffff8021da68b7b8
[14942.795832] x25: 0000000000000000 x24: ffff8021eb407800
[14942.801445] x23: 0000000000000000 x22: 0000000000000000
[14942.807046] x21: 0000000000000001 x20: 0000000000000000
[14942.812672] x19: 0000000000000000 x18: ffff000009781708
[14942.818284] x17: 00000000004970e8 x16: ffff00000816ad48
[14942.823900] x15: 0000000000000000 x14: 0000000000000008
[14942.829528] x13: 0000000000000000 x12: 0000000000000f65
[14942.835149] x11: 0000000000000001 x10: 00000000000009d0
[14942.840753] x9 : ffff0000121abaa0 x8 : 0000000000000000
[14942.846360] x7 : ffff000009781708 x6 : 0000000000000003
[14942.851970] x5 : 0000000000000020 x4 : 0000000000000004
[14942.857575] x3 : 0000000000000002 x2 : 0000000000000001
[14942.863180] x1 : 0000000000000048 x0 : 0000000000000000
[14942.868875] Process kworker/u4:13 (pid: 26629, stack limit = 0x00000000c909dbf3)
[14942.876464] Call trace:
[14942.879200] test_and_set_bit+0x18/0x38
[14942.883376] phy_link_change+0x38/0x78
[14942.887378] phy_state_machine+0x3dc/0x4f8
[14942.891968] process_one_work+0x158/0x470
[14942.896223] worker_thread+0x50/0x470
[14942.900219] kthread+0x104/0x130
[14942.903905] ret_from_fork+0x10/0x1c
[14942.907755] Code: d2800022 8b400c21 f9800031 9ac32044 (c85f7c22)
[14942.914185] ---[ end trace 968c9e12eb740b23 ]---
So this patch fixes it by modifying the timing to do phy_connect_direct()
and phy_disconnect().
Fixes: 256727da7395 ("net: hns3: Add MDIO support to HNS3 Ethernet driver for hip08 SoC")
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-31 04:55:46 +08:00
|
|
|
hns3_uninit_phy(netdev);
|
|
|
|
out_init_phy:
|
|
|
|
hns3_uninit_all_ring(priv);
|
2017-08-02 23:59:45 +08:00
|
|
|
out_init_ring_data:
|
2019-01-31 04:55:47 +08:00
|
|
|
hns3_nic_uninit_vector_data(priv);
|
2017-08-02 23:59:45 +08:00
|
|
|
out_init_vector_data:
|
2018-03-09 10:37:02 +08:00
|
|
|
hns3_nic_dealloc_vector_data(priv);
|
|
|
|
out_alloc_vector_data:
|
|
|
|
priv->ring_data = NULL;
|
2017-08-02 23:59:45 +08:00
|
|
|
out_get_ring_cfg:
|
|
|
|
priv->ae_handle = NULL;
|
|
|
|
free_netdev(netdev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = handle->kinfo.netdev;
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
|
int ret;
|
|
|
|
|
2018-10-06 01:03:25 +08:00
|
|
|
hns3_remove_hw_addr(netdev);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
if (netdev->reg_state != NETREG_UNINITIALIZED)
|
|
|
|
unregister_netdev(netdev);
|
|
|
|
|
2019-04-06 15:43:28 +08:00
|
|
|
hns3_client_stop(handle);
|
|
|
|
|
2018-11-07 12:06:07 +08:00
|
|
|
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
|
|
|
|
netdev_warn(netdev, "already uninitialized\n");
|
|
|
|
goto out_netdev_free;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:46:46 +08:00
|
|
|
hns3_del_all_fd_rules(netdev, true);
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
hns3_force_clear_all_rx_ring(handle);
|
|
|
|
|
net: hns3: Fix NULL deref when unloading driver
When the driver is unloading, if there is a calling of ndo_open occurs
between phy_disconnect() and unregister_netdev(), it will end up
causing the kernel to eventually hit a NULL deref:
[14942.417828] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000048
[14942.529878] Mem abort info:
[14942.551166] ESR = 0x96000006
[14942.567070] Exception class = DABT (current EL), IL = 32 bits
[14942.623081] SET = 0, FnV = 0
[14942.639112] EA = 0, S1PTW = 0
[14942.643628] Data abort info:
[14942.659227] ISV = 0, ISS = 0x00000006
[14942.674870] CM = 0, WnR = 0
[14942.679449] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000224ad6ad
[14942.695595] [0000000000000048] pgd=00000021e6673003, pud=00000021dbf01003, pmd=0000000000000000
[14942.723163] Internal error: Oops: 96000006 [#1] PREEMPT SMP
[14942.729358] Modules linked in: hns3(O) hclge(O) pv680_mii(O) hnae3(O) [last unloaded: hclge]
[14942.738907] CPU: 1 PID: 26629 Comm: kworker/u4:13 Tainted: G O 4.18.0-rc1-12928-ga960791-dirty #145
[14942.749491] Hardware name: Huawei Technologies Co., Ltd. D05/D05, BIOS Hi1620 FPGA TB BOOT BIOS B763 08/17/2018
[14942.760392] Workqueue: events_power_efficient phy_state_machine
[14942.766644] pstate: 80c00009 (Nzcv daif +PAN +UAO)
[14942.771918] pc : test_and_set_bit+0x18/0x38
[14942.776589] lr : netif_carrier_off+0x24/0x70
[14942.781033] sp : ffff0000121abd20
[14942.784518] x29: ffff0000121abd20 x28: 0000000000000000
[14942.790208] x27: ffff0000164d3cd8 x26: ffff8021da68b7b8
[14942.795832] x25: 0000000000000000 x24: ffff8021eb407800
[14942.801445] x23: 0000000000000000 x22: 0000000000000000
[14942.807046] x21: 0000000000000001 x20: 0000000000000000
[14942.812672] x19: 0000000000000000 x18: ffff000009781708
[14942.818284] x17: 00000000004970e8 x16: ffff00000816ad48
[14942.823900] x15: 0000000000000000 x14: 0000000000000008
[14942.829528] x13: 0000000000000000 x12: 0000000000000f65
[14942.835149] x11: 0000000000000001 x10: 00000000000009d0
[14942.840753] x9 : ffff0000121abaa0 x8 : 0000000000000000
[14942.846360] x7 : ffff000009781708 x6 : 0000000000000003
[14942.851970] x5 : 0000000000000020 x4 : 0000000000000004
[14942.857575] x3 : 0000000000000002 x2 : 0000000000000001
[14942.863180] x1 : 0000000000000048 x0 : 0000000000000000
[14942.868875] Process kworker/u4:13 (pid: 26629, stack limit = 0x00000000c909dbf3)
[14942.876464] Call trace:
[14942.879200] test_and_set_bit+0x18/0x38
[14942.883376] phy_link_change+0x38/0x78
[14942.887378] phy_state_machine+0x3dc/0x4f8
[14942.891968] process_one_work+0x158/0x470
[14942.896223] worker_thread+0x50/0x470
[14942.900219] kthread+0x104/0x130
[14942.903905] ret_from_fork+0x10/0x1c
[14942.907755] Code: d2800022 8b400c21 f9800031 9ac32044 (c85f7c22)
[14942.914185] ---[ end trace 968c9e12eb740b23 ]---
So this patch fixes it by modifying the timing to do phy_connect_direct()
and phy_disconnect().
Fixes: 256727da7395 ("net: hns3: Add MDIO support to HNS3 Ethernet driver for hip08 SoC")
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-01-31 04:55:46 +08:00
|
|
|
hns3_uninit_phy(netdev);
|
|
|
|
|
2019-01-31 04:55:47 +08:00
|
|
|
hns3_nic_uninit_vector_data(priv);
|
2017-08-02 23:59:45 +08:00
|
|
|
|
2018-03-09 10:37:02 +08:00
|
|
|
ret = hns3_nic_dealloc_vector_data(priv);
|
|
|
|
if (ret)
|
|
|
|
netdev_err(netdev, "dealloc vector error\n");
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
ret = hns3_uninit_all_ring(priv);
|
|
|
|
if (ret)
|
|
|
|
netdev_err(netdev, "uninit ring error\n");
|
|
|
|
|
2018-03-09 10:37:00 +08:00
|
|
|
hns3_put_ring_config(priv);
|
|
|
|
|
2018-11-22 22:09:41 +08:00
|
|
|
hns3_dbg_uninit(handle);
|
|
|
|
|
2018-11-07 12:06:07 +08:00
|
|
|
out_netdev_free:
|
2017-08-02 23:59:45 +08:00
|
|
|
free_netdev(netdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = handle->kinfo.netdev;
|
|
|
|
|
|
|
|
if (!netdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (linkup) {
|
|
|
|
netif_carrier_on(netdev);
|
|
|
|
netif_tx_wake_all_queues(netdev);
|
2019-04-19 11:05:43 +08:00
|
|
|
if (netif_msg_link(handle))
|
|
|
|
netdev_info(netdev, "link up\n");
|
2017-08-02 23:59:45 +08:00
|
|
|
} else {
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
netif_tx_stop_all_queues(netdev);
|
2019-04-19 11:05:43 +08:00
|
|
|
if (netif_msg_link(handle))
|
|
|
|
netdev_info(netdev, "link down\n");
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-27 09:45:32 +08:00
|
|
|
static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
|
|
|
|
{
|
|
|
|
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
|
|
|
struct net_device *ndev = kinfo->netdev;
|
|
|
|
|
|
|
|
if (tc > HNAE3_MAX_TC)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!ndev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2019-01-27 00:49:17 +08:00
|
|
|
return hns3_nic_set_real_num_queue(ndev);
|
2017-09-27 09:45:32 +08:00
|
|
|
}
|
|
|
|
|
2018-10-30 21:50:50 +08:00
|
|
|
static int hns3_recover_hw_addr(struct net_device *ndev)
|
2017-11-02 20:45:21 +08:00
|
|
|
{
|
|
|
|
struct netdev_hw_addr_list *list;
|
|
|
|
struct netdev_hw_addr *ha, *tmp;
|
2018-10-30 21:50:50 +08:00
|
|
|
int ret = 0;
|
2017-11-02 20:45:21 +08:00
|
|
|
|
2019-04-04 16:17:55 +08:00
|
|
|
netif_addr_lock_bh(ndev);
|
2017-11-02 20:45:21 +08:00
|
|
|
/* go through and sync uc_addr entries to the device */
|
|
|
|
list = &ndev->uc;
|
2018-10-30 21:50:50 +08:00
|
|
|
list_for_each_entry_safe(ha, tmp, &list->list, list) {
|
|
|
|
ret = hns3_nic_uc_sync(ndev, ha->addr);
|
|
|
|
if (ret)
|
2019-04-04 16:17:55 +08:00
|
|
|
goto out;
|
2018-10-30 21:50:50 +08:00
|
|
|
}
|
2017-11-02 20:45:21 +08:00
|
|
|
|
|
|
|
/* go through and sync mc_addr entries to the device */
|
|
|
|
list = &ndev->mc;
|
2018-10-30 21:50:50 +08:00
|
|
|
list_for_each_entry_safe(ha, tmp, &list->list, list) {
|
|
|
|
ret = hns3_nic_mc_sync(ndev, ha->addr);
|
|
|
|
if (ret)
|
2019-04-04 16:17:55 +08:00
|
|
|
goto out;
|
2018-10-30 21:50:50 +08:00
|
|
|
}
|
|
|
|
|
2019-04-04 16:17:55 +08:00
|
|
|
out:
|
|
|
|
netif_addr_unlock_bh(ndev);
|
2018-10-30 21:50:50 +08:00
|
|
|
return ret;
|
2017-11-02 20:45:21 +08:00
|
|
|
}
|
|
|
|
|
2018-10-06 01:03:25 +08:00
|
|
|
static void hns3_remove_hw_addr(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct netdev_hw_addr_list *list;
|
|
|
|
struct netdev_hw_addr *ha, *tmp;
|
|
|
|
|
|
|
|
hns3_nic_uc_unsync(netdev, netdev->dev_addr);
|
|
|
|
|
2019-04-04 16:17:55 +08:00
|
|
|
netif_addr_lock_bh(netdev);
|
2018-10-06 01:03:25 +08:00
|
|
|
/* go through and unsync uc_addr entries to the device */
|
|
|
|
list = &netdev->uc;
|
|
|
|
list_for_each_entry_safe(ha, tmp, &list->list, list)
|
|
|
|
hns3_nic_uc_unsync(netdev, ha->addr);
|
|
|
|
|
|
|
|
/* go through and unsync mc_addr entries to the device */
|
|
|
|
list = &netdev->mc;
|
|
|
|
list_for_each_entry_safe(ha, tmp, &list->list, list)
|
|
|
|
if (ha->refcount > 1)
|
|
|
|
hns3_nic_mc_unsync(netdev, ha->addr);
|
2019-04-04 16:17:55 +08:00
|
|
|
|
|
|
|
netif_addr_unlock_bh(netdev);
|
2018-10-06 01:03:25 +08:00
|
|
|
}
|
|
|
|
|
2018-05-10 00:24:40 +08:00
|
|
|
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
|
2017-11-02 20:45:21 +08:00
|
|
|
{
|
2018-05-10 00:24:40 +08:00
|
|
|
while (ring->next_to_clean != ring->next_to_use) {
|
2018-05-26 02:43:04 +08:00
|
|
|
ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
|
2018-05-10 00:24:40 +08:00
|
|
|
hns3_free_buffer_detach(ring, ring->next_to_clean);
|
|
|
|
ring_ptr_move_fw(ring, next_to_clean);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
|
|
|
|
{
|
|
|
|
struct hns3_desc_cb res_cbs;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (ring->next_to_use != ring->next_to_clean) {
|
|
|
|
/* When a buffer is not reused, it's memory has been
|
|
|
|
* freed in hns3_handle_rx_bd or will be freed by
|
|
|
|
* stack, so we need to replace the buffer here.
|
|
|
|
*/
|
|
|
|
if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
|
|
|
|
ret = hns3_reserve_buffer_map(ring, &res_cbs);
|
|
|
|
if (ret) {
|
|
|
|
u64_stats_update_begin(&ring->syncp);
|
|
|
|
ring->stats.sw_err_cnt++;
|
|
|
|
u64_stats_update_end(&ring->syncp);
|
|
|
|
/* if alloc new buffer fail, exit directly
|
|
|
|
* and reclear in up flow.
|
|
|
|
*/
|
|
|
|
netdev_warn(ring->tqp->handle->kinfo.netdev,
|
|
|
|
"reserve buffer map failed, ret = %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
hns3_replace_buffer(ring, ring->next_to_use,
|
|
|
|
&res_cbs);
|
|
|
|
}
|
|
|
|
ring_ptr_move_fw(ring, next_to_use);
|
|
|
|
}
|
|
|
|
|
2019-04-14 09:47:45 +08:00
|
|
|
/* Free the pending skb in rx ring */
|
|
|
|
if (ring->skb) {
|
|
|
|
dev_kfree_skb_any(ring->skb);
|
|
|
|
ring->skb = NULL;
|
|
|
|
ring->pending_buf = 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
|
2018-05-10 00:24:40 +08:00
|
|
|
{
|
|
|
|
while (ring->next_to_use != ring->next_to_clean) {
|
|
|
|
/* When a buffer is not reused, it's memory has been
|
|
|
|
* freed in hns3_handle_rx_bd or will be freed by
|
|
|
|
* stack, so only need to unmap the buffer here.
|
|
|
|
*/
|
|
|
|
if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
|
|
|
|
hns3_unmap_buffer(ring,
|
|
|
|
&ring->desc_cb[ring->next_to_use]);
|
|
|
|
ring->desc_cb[ring->next_to_use].dma = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ring_ptr_move_fw(ring, next_to_use);
|
|
|
|
}
|
2017-11-02 20:45:21 +08:00
|
|
|
}
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = h->kinfo.netdev;
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
|
|
|
struct hns3_enet_ring *ring;
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
|
|
|
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
|
|
|
|
hns3_force_clear_rx_ring(ring);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-02 20:45:21 +08:00
|
|
|
static void hns3_clear_all_ring(struct hnae3_handle *h)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = h->kinfo.netdev;
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
|
|
|
struct netdev_queue *dev_queue;
|
|
|
|
struct hns3_enet_ring *ring;
|
|
|
|
|
|
|
|
ring = priv->ring_data[i].ring;
|
2018-05-10 00:24:40 +08:00
|
|
|
hns3_clear_tx_ring(ring);
|
2017-11-02 20:45:21 +08:00
|
|
|
dev_queue = netdev_get_tx_queue(ndev,
|
|
|
|
priv->ring_data[i].queue_index);
|
|
|
|
netdev_tx_reset_queue(dev_queue);
|
|
|
|
|
|
|
|
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
|
2018-05-26 02:43:04 +08:00
|
|
|
/* Continue to clear other rings even if clearing some
|
|
|
|
* rings failed.
|
|
|
|
*/
|
2018-05-10 00:24:40 +08:00
|
|
|
hns3_clear_rx_ring(ring);
|
2017-11-02 20:45:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
int hns3_nic_reset_all_ring(struct hnae3_handle *h)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = h->kinfo.netdev;
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
|
|
|
struct hns3_enet_ring *rx_ring;
|
|
|
|
int i, j;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
2018-10-30 21:50:50 +08:00
|
|
|
ret = h->ae_algo->ops->reset_queue(h, i);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
hns3_init_ring_hw(priv->ring_data[i].ring);
|
|
|
|
|
|
|
|
/* We need to clear tx ring here because self test will
|
|
|
|
* use the ring and will not run down before up
|
|
|
|
*/
|
|
|
|
hns3_clear_tx_ring(priv->ring_data[i].ring);
|
|
|
|
priv->ring_data[i].ring->next_to_clean = 0;
|
|
|
|
priv->ring_data[i].ring->next_to_use = 0;
|
|
|
|
|
|
|
|
rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
|
|
|
|
hns3_init_ring_hw(rx_ring);
|
|
|
|
ret = hns3_clear_rx_ring(rx_ring);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* We can not know the hardware head and tail when this
|
|
|
|
* function is called in reset flow, so we reuse all desc.
|
|
|
|
*/
|
|
|
|
for (j = 0; j < rx_ring->desc_num; j++)
|
|
|
|
hns3_reuse_buffer(rx_ring, j);
|
|
|
|
|
|
|
|
rx_ring->next_to_clean = 0;
|
|
|
|
rx_ring->next_to_use = 0;
|
|
|
|
}
|
|
|
|
|
2018-08-15 00:13:18 +08:00
|
|
|
hns3_init_tx_ring_tc(priv);
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-27 02:28:40 +08:00
|
|
|
static void hns3_store_coal(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
/* ethtool only support setting and querying one coal
|
|
|
|
* configuation for now, so save the vector 0' coal
|
|
|
|
* configuation here in order to restore it.
|
|
|
|
*/
|
|
|
|
memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
|
|
|
|
sizeof(struct hns3_enet_coalesce));
|
|
|
|
memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
|
|
|
|
sizeof(struct hns3_enet_coalesce));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hns3_restore_coal(struct hns3_nic_priv *priv)
|
|
|
|
{
|
|
|
|
u16 vector_num = priv->vector_num;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < vector_num; i++) {
|
|
|
|
memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
|
|
|
|
sizeof(struct hns3_enet_coalesce));
|
|
|
|
memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
|
|
|
|
sizeof(struct hns3_enet_coalesce));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-02 20:45:21 +08:00
|
|
|
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
|
|
|
|
{
|
2018-11-07 12:06:10 +08:00
|
|
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
|
2017-11-02 20:45:21 +08:00
|
|
|
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
|
|
|
struct net_device *ndev = kinfo->netdev;
|
2018-11-07 12:06:12 +08:00
|
|
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
|
|
|
|
return 0;
|
2017-11-02 20:45:21 +08:00
|
|
|
|
2018-11-07 12:06:10 +08:00
|
|
|
/* it is cumbersome for hardware to pick-and-choose entries for deletion
|
|
|
|
* from table space. Hence, for function reset software intervention is
|
|
|
|
* required to delete the entries
|
|
|
|
*/
|
|
|
|
if (hns3_dev_ongoing_func_reset(ae_dev)) {
|
|
|
|
hns3_remove_hw_addr(ndev);
|
|
|
|
hns3_del_all_fd_rules(ndev, false);
|
|
|
|
}
|
|
|
|
|
2017-11-02 20:45:21 +08:00
|
|
|
if (!netif_running(ndev))
|
2018-07-16 23:36:23 +08:00
|
|
|
return 0;
|
2017-11-02 20:45:21 +08:00
|
|
|
|
|
|
|
return hns3_nic_net_stop(ndev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
|
|
|
|
{
|
|
|
|
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
2018-11-07 12:06:12 +08:00
|
|
|
struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
|
2017-11-02 20:45:21 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2018-12-31 10:58:29 +08:00
|
|
|
clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
|
|
|
|
|
2017-11-02 20:45:21 +08:00
|
|
|
if (netif_running(kinfo->netdev)) {
|
2018-12-31 10:58:29 +08:00
|
|
|
ret = hns3_nic_net_open(kinfo->netdev);
|
2017-11-02 20:45:21 +08:00
|
|
|
if (ret) {
|
2018-12-31 10:58:29 +08:00
|
|
|
set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
|
2017-11-02 20:45:21 +08:00
|
|
|
netdev_err(kinfo->netdev,
|
|
|
|
"hns net up fail, ret=%d!\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = handle->kinfo.netdev;
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Carrier off reporting is important to ethtool even BEFORE open */
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
2019-01-18 16:13:03 +08:00
|
|
|
ret = hns3_get_ring_config(priv);
|
2018-11-09 22:07:55 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-01-18 16:13:03 +08:00
|
|
|
ret = hns3_nic_alloc_vector_data(priv);
|
|
|
|
if (ret)
|
|
|
|
goto err_put_ring;
|
|
|
|
|
2018-09-27 02:28:40 +08:00
|
|
|
hns3_restore_coal(priv);
|
|
|
|
|
2017-11-02 20:45:21 +08:00
|
|
|
ret = hns3_nic_init_vector_data(priv);
|
|
|
|
if (ret)
|
2018-11-09 22:07:55 +08:00
|
|
|
goto err_dealloc_vector;
|
2017-11-02 20:45:21 +08:00
|
|
|
|
|
|
|
ret = hns3_init_all_ring(priv);
|
2018-11-09 22:07:55 +08:00
|
|
|
if (ret)
|
|
|
|
goto err_uninit_vector;
|
2017-11-02 20:45:21 +08:00
|
|
|
|
2019-04-06 15:43:26 +08:00
|
|
|
ret = hns3_client_start(handle);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
|
|
|
|
goto err_uninit_ring;
|
|
|
|
}
|
|
|
|
|
2018-11-07 12:06:07 +08:00
|
|
|
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
|
|
|
|
|
2018-11-09 22:07:55 +08:00
|
|
|
return ret;
|
|
|
|
|
2019-04-06 15:43:26 +08:00
|
|
|
err_uninit_ring:
|
|
|
|
hns3_uninit_all_ring(priv);
|
2018-11-09 22:07:55 +08:00
|
|
|
err_uninit_vector:
|
|
|
|
hns3_nic_uninit_vector_data(priv);
|
|
|
|
err_dealloc_vector:
|
|
|
|
hns3_nic_dealloc_vector_data(priv);
|
2019-01-18 16:13:03 +08:00
|
|
|
err_put_ring:
|
|
|
|
hns3_put_ring_config(priv);
|
2018-11-09 22:07:55 +08:00
|
|
|
|
2017-11-02 20:45:21 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-18 16:13:14 +08:00
|
|
|
static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = handle->kinfo.netdev;
|
|
|
|
bool vlan_filter_enable;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = hns3_init_mac_addr(netdev, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = hns3_recover_hw_addr(netdev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
|
|
|
|
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
|
|
|
|
|
|
|
|
/* Hardware table is only clear when pf resets */
|
|
|
|
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
|
|
|
|
ret = hns3_restore_vlan(netdev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return hns3_restore_fd_rules(netdev);
|
|
|
|
}
|
|
|
|
|
2017-11-02 20:45:21 +08:00
|
|
|
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = handle->kinfo.netdev;
|
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
|
int ret;
|
|
|
|
|
2019-04-04 16:17:58 +08:00
|
|
|
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
|
2018-11-07 12:06:07 +08:00
|
|
|
netdev_warn(netdev, "already uninitialized\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-26 02:43:04 +08:00
|
|
|
hns3_force_clear_all_rx_ring(handle);
|
2017-11-02 20:45:21 +08:00
|
|
|
|
2019-01-31 04:55:47 +08:00
|
|
|
hns3_nic_uninit_vector_data(priv);
|
2017-11-02 20:45:21 +08:00
|
|
|
|
2018-09-27 02:28:40 +08:00
|
|
|
hns3_store_coal(priv);
|
|
|
|
|
2018-11-09 22:07:55 +08:00
|
|
|
ret = hns3_nic_dealloc_vector_data(priv);
|
|
|
|
if (ret)
|
|
|
|
netdev_err(netdev, "dealloc vector error\n");
|
|
|
|
|
2017-11-02 20:45:21 +08:00
|
|
|
ret = hns3_uninit_all_ring(priv);
|
|
|
|
if (ret)
|
|
|
|
netdev_err(netdev, "uninit ring error\n");
|
|
|
|
|
2019-01-18 16:13:03 +08:00
|
|
|
hns3_put_ring_config(priv);
|
|
|
|
|
2017-11-02 20:45:21 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hns3_reset_notify(struct hnae3_handle *handle,
|
|
|
|
enum hnae3_reset_notify_type type)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case HNAE3_UP_CLIENT:
|
2018-01-19 23:20:53 +08:00
|
|
|
ret = hns3_reset_notify_up_enet(handle);
|
|
|
|
break;
|
2017-11-02 20:45:21 +08:00
|
|
|
case HNAE3_DOWN_CLIENT:
|
|
|
|
ret = hns3_reset_notify_down_enet(handle);
|
|
|
|
break;
|
|
|
|
case HNAE3_INIT_CLIENT:
|
|
|
|
ret = hns3_reset_notify_init_enet(handle);
|
|
|
|
break;
|
|
|
|
case HNAE3_UNINIT_CLIENT:
|
|
|
|
ret = hns3_reset_notify_uninit_enet(handle);
|
|
|
|
break;
|
2019-01-18 16:13:14 +08:00
|
|
|
case HNAE3_RESTORE_CLIENT:
|
|
|
|
ret = hns3_reset_notify_restore_enet(handle);
|
|
|
|
break;
|
2017-11-02 20:45:21 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-12-22 12:21:41 +08:00
|
|
|
int hns3_set_channels(struct net_device *netdev,
|
|
|
|
struct ethtool_channels *ch)
|
|
|
|
{
|
|
|
|
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
|
|
struct hnae3_knic_private_info *kinfo = &h->kinfo;
|
2019-01-23 07:39:30 +08:00
|
|
|
bool rxfh_configured = netif_is_rxfh_configured(netdev);
|
2017-12-22 12:21:41 +08:00
|
|
|
u32 new_tqp_num = ch->combined_count;
|
|
|
|
u16 org_tqp_num;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (ch->rx_count || ch->tx_count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-03-08 19:41:54 +08:00
|
|
|
if (new_tqp_num > hns3_get_max_available_channels(h) ||
|
2019-01-18 16:13:08 +08:00
|
|
|
new_tqp_num < 1) {
|
2017-12-22 12:21:41 +08:00
|
|
|
dev_err(&netdev->dev,
|
2019-01-18 16:13:08 +08:00
|
|
|
"Change tqps fail, the tqp range is from 1 to %d",
|
2018-03-08 19:41:54 +08:00
|
|
|
hns3_get_max_available_channels(h));
|
2017-12-22 12:21:41 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-01-18 16:13:08 +08:00
|
|
|
if (kinfo->rss_size == new_tqp_num)
|
2017-12-22 12:21:41 +08:00
|
|
|
return 0;
|
|
|
|
|
2019-01-18 16:13:06 +08:00
|
|
|
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-03-09 10:37:02 +08:00
|
|
|
|
2019-01-18 16:13:06 +08:00
|
|
|
ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-12-22 12:21:41 +08:00
|
|
|
|
|
|
|
org_tqp_num = h->kinfo.num_tqps;
|
2019-01-23 07:39:30 +08:00
|
|
|
ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured);
|
2017-12-22 12:21:41 +08:00
|
|
|
if (ret) {
|
2019-01-23 07:39:30 +08:00
|
|
|
ret = h->ae_algo->ops->set_channels(h, org_tqp_num,
|
|
|
|
rxfh_configured);
|
2017-12-22 12:21:41 +08:00
|
|
|
if (ret) {
|
|
|
|
/* If revert to old tqp failed, fatal error occurred */
|
|
|
|
dev_err(&netdev->dev,
|
|
|
|
"Revert to old tqp num fail, ret=%d", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
dev_info(&netdev->dev,
|
|
|
|
"Change tqp num fail, Revert to old tqp num");
|
|
|
|
}
|
2019-01-18 16:13:06 +08:00
|
|
|
ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-12-22 12:21:41 +08:00
|
|
|
|
2019-01-18 16:13:06 +08:00
|
|
|
return hns3_reset_notify(h, HNAE3_UP_CLIENT);
|
2017-12-22 12:21:41 +08:00
|
|
|
}
|
|
|
|
|
2017-10-09 15:44:01 +08:00
|
|
|
static const struct hnae3_client_ops client_ops = {
|
2017-08-02 23:59:45 +08:00
|
|
|
.init_instance = hns3_client_init,
|
|
|
|
.uninit_instance = hns3_client_uninit,
|
|
|
|
.link_status_change = hns3_link_status_change,
|
2017-09-27 09:45:32 +08:00
|
|
|
.setup_tc = hns3_client_setup_tc,
|
2017-11-02 20:45:21 +08:00
|
|
|
.reset_notify = hns3_reset_notify,
|
2017-08-02 23:59:45 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* hns3_init_module - Driver registration routine
|
|
|
|
* hns3_init_module is the first routine called when the driver is
|
|
|
|
* loaded. All it does is register with the PCI subsystem.
|
|
|
|
*/
|
|
|
|
static int __init hns3_init_module(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
|
|
|
|
pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
|
|
|
|
|
|
|
|
client.type = HNAE3_CLIENT_KNIC;
|
|
|
|
snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
|
|
|
|
hns3_driver_name);
|
|
|
|
|
|
|
|
client.ops = &client_ops;
|
|
|
|
|
2018-05-19 23:53:18 +08:00
|
|
|
INIT_LIST_HEAD(&client.node);
|
|
|
|
|
2018-11-22 22:09:41 +08:00
|
|
|
hns3_dbg_register_debugfs(hns3_driver_name);
|
|
|
|
|
2017-08-02 23:59:45 +08:00
|
|
|
ret = hnae3_register_client(&client);
|
|
|
|
if (ret)
|
2018-11-22 22:09:41 +08:00
|
|
|
goto err_reg_client;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
ret = pci_register_driver(&hns3_driver);
|
|
|
|
if (ret)
|
2018-11-22 22:09:41 +08:00
|
|
|
goto err_reg_driver;
|
2017-08-02 23:59:45 +08:00
|
|
|
|
|
|
|
return ret;
|
2018-11-22 22:09:41 +08:00
|
|
|
|
|
|
|
err_reg_driver:
|
|
|
|
hnae3_unregister_client(&client);
|
|
|
|
err_reg_client:
|
|
|
|
hns3_dbg_unregister_debugfs();
|
|
|
|
return ret;
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
module_init(hns3_init_module);
|
|
|
|
|
|
|
|
/* hns3_exit_module - Driver exit cleanup routine
|
|
|
|
* hns3_exit_module is called just before the driver is removed
|
|
|
|
* from memory.
|
|
|
|
*/
|
|
|
|
static void __exit hns3_exit_module(void)
|
|
|
|
{
|
|
|
|
pci_unregister_driver(&hns3_driver);
|
|
|
|
hnae3_unregister_client(&client);
|
2018-11-22 22:09:41 +08:00
|
|
|
hns3_dbg_unregister_debugfs();
|
2017-08-02 23:59:45 +08:00
|
|
|
}
|
|
|
|
module_exit(hns3_exit_module);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
|
|
|
|
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_ALIAS("pci:hns-nic");
|
2018-05-19 23:53:19 +08:00
|
|
|
MODULE_VERSION(HNS3_MOD_VERSION);
|