Merge branch 'hns3-next'
Huazhong Tan says: ==================== hns3: provide new interfaces & bugfixes & code optimization This patchset provides some reset interfaces for RAS & RoCE, also some bugfixes and optimization related to reset. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6a02d1fa03
|
@ -162,6 +162,7 @@ struct hnae3_client_ops {
|
|||
int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
|
||||
int (*reset_notify)(struct hnae3_handle *handle,
|
||||
enum hnae3_reset_notify_type type);
|
||||
enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
|
||||
};
|
||||
|
||||
#define HNAE3_CLIENT_NAME_LENGTH 16
|
||||
|
@ -403,6 +404,8 @@ struct hnae3_ae_ops {
|
|||
u16 vlan, u8 qos, __be16 proto);
|
||||
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
|
||||
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
|
||||
void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type);
|
||||
void (*get_channels)(struct hnae3_handle *handle,
|
||||
struct ethtool_channels *ch);
|
||||
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
|
||||
|
@ -430,6 +433,9 @@ struct hnae3_ae_ops {
|
|||
int (*restore_fd_rules)(struct hnae3_handle *handle);
|
||||
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
|
||||
pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
|
||||
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
|
||||
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
|
||||
unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
|
||||
};
|
||||
|
||||
struct hnae3_dcb_ops {
|
||||
|
@ -488,6 +494,14 @@ struct hnae3_roce_private_info {
|
|||
void __iomem *roce_io_base;
|
||||
int base_vector;
|
||||
int num_vectors;
|
||||
|
||||
/* The below attributes defined for RoCE client, hnae3 gives
|
||||
* initial values to them, and RoCE client can modify and use
|
||||
* them.
|
||||
*/
|
||||
unsigned long reset_state;
|
||||
unsigned long instance_state;
|
||||
unsigned long state;
|
||||
};
|
||||
|
||||
struct hnae3_unic_private_info {
|
||||
|
@ -520,9 +534,6 @@ struct hnae3_handle {
|
|||
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
|
||||
u64 flags; /* Indicate the capabilities for this handle*/
|
||||
|
||||
unsigned long last_reset_time;
|
||||
enum hnae3_reset_type reset_level;
|
||||
|
||||
union {
|
||||
struct net_device *netdev; /* first member */
|
||||
struct hnae3_knic_private_info kinfo;
|
||||
|
|
|
@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
|
|||
{
|
||||
struct hnae3_handle *h = hns3_get_handle(ndev);
|
||||
|
||||
if (hns3_nic_resetting(ndev))
|
||||
return -EBUSY;
|
||||
|
||||
if (h->kinfo.dcb_ops->ieee_getets)
|
||||
return h->kinfo.dcb_ops->ieee_getets(h, ets);
|
||||
|
||||
|
@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
|
|||
{
|
||||
struct hnae3_handle *h = hns3_get_handle(ndev);
|
||||
|
||||
if (hns3_nic_resetting(ndev))
|
||||
return -EBUSY;
|
||||
|
||||
if (h->kinfo.dcb_ops->ieee_setets)
|
||||
return h->kinfo.dcb_ops->ieee_setets(h, ets);
|
||||
|
||||
|
@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
|
|||
{
|
||||
struct hnae3_handle *h = hns3_get_handle(ndev);
|
||||
|
||||
if (hns3_nic_resetting(ndev))
|
||||
return -EBUSY;
|
||||
|
||||
if (h->kinfo.dcb_ops->ieee_getpfc)
|
||||
return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
|
||||
|
||||
|
@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
|
|||
{
|
||||
struct hnae3_handle *h = hns3_get_handle(ndev);
|
||||
|
||||
if (hns3_nic_resetting(ndev))
|
||||
return -EBUSY;
|
||||
|
||||
if (h->kinfo.dcb_ops->ieee_setpfc)
|
||||
return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
|
||||
|
||||
|
|
|
@ -312,6 +312,24 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
|
|||
return min_t(u16, rss_size, max_rss_size);
|
||||
}
|
||||
|
||||
static void hns3_tqp_enable(struct hnae3_queue *tqp)
|
||||
{
|
||||
u32 rcb_reg;
|
||||
|
||||
rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
|
||||
rcb_reg |= BIT(HNS3_RING_EN_B);
|
||||
hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
|
||||
}
|
||||
|
||||
static void hns3_tqp_disable(struct hnae3_queue *tqp)
|
||||
{
|
||||
u32 rcb_reg;
|
||||
|
||||
rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
|
||||
rcb_reg &= ~BIT(HNS3_RING_EN_B);
|
||||
hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
|
||||
}
|
||||
|
||||
static int hns3_nic_net_up(struct net_device *netdev)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
@ -334,6 +352,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
|
|||
for (i = 0; i < priv->vector_num; i++)
|
||||
hns3_vector_enable(&priv->tqp_vector[i]);
|
||||
|
||||
/* enable rcb */
|
||||
for (j = 0; j < h->kinfo.num_tqps; j++)
|
||||
hns3_tqp_enable(h->kinfo.tqp[j]);
|
||||
|
||||
/* start the ae_dev */
|
||||
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
|
||||
if (ret)
|
||||
|
@ -344,6 +366,9 @@ static int hns3_nic_net_up(struct net_device *netdev)
|
|||
return 0;
|
||||
|
||||
out_start_err:
|
||||
while (j--)
|
||||
hns3_tqp_disable(h->kinfo.tqp[j]);
|
||||
|
||||
for (j = i - 1; j >= 0; j--)
|
||||
hns3_vector_disable(&priv->tqp_vector[j]);
|
||||
|
||||
|
@ -354,11 +379,13 @@ out_start_err:
|
|||
|
||||
static int hns3_nic_net_open(struct net_device *netdev)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
struct hnae3_knic_private_info *kinfo;
|
||||
int i, ret;
|
||||
|
||||
if (hns3_nic_resetting(netdev))
|
||||
return -EBUSY;
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
ret = hns3_nic_set_real_num_queue(netdev);
|
||||
|
@ -378,13 +405,13 @@ static int hns3_nic_net_open(struct net_device *netdev)
|
|||
kinfo->prio_tc[i]);
|
||||
}
|
||||
|
||||
priv->ae_handle->last_reset_time = jiffies;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hns3_nic_net_down(struct net_device *netdev)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
const struct hnae3_ae_ops *ops;
|
||||
int i;
|
||||
|
||||
|
@ -395,6 +422,10 @@ static void hns3_nic_net_down(struct net_device *netdev)
|
|||
for (i = 0; i < priv->vector_num; i++)
|
||||
hns3_vector_disable(&priv->tqp_vector[i]);
|
||||
|
||||
/* disable rcb */
|
||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
||||
hns3_tqp_disable(h->kinfo.tqp[i]);
|
||||
|
||||
/* stop ae_dev */
|
||||
ops = priv->ae_handle->ae_algo->ops;
|
||||
if (ops->stop)
|
||||
|
@ -1615,10 +1646,9 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
|
|||
|
||||
priv->tx_timeout_count++;
|
||||
|
||||
if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
|
||||
return;
|
||||
|
||||
/* request the reset */
|
||||
/* request the reset, and let the hclge to determine
|
||||
* which reset level should be done
|
||||
*/
|
||||
if (h->ae_algo->ops->reset_event)
|
||||
h->ae_algo->ops->reset_event(h->pdev, h);
|
||||
}
|
||||
|
@ -3337,7 +3367,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
|||
priv->dev = &pdev->dev;
|
||||
priv->netdev = netdev;
|
||||
priv->ae_handle = handle;
|
||||
priv->ae_handle->last_reset_time = jiffies;
|
||||
priv->tx_timeout_count = 0;
|
||||
|
||||
handle->kinfo.netdev = netdev;
|
||||
|
@ -3357,11 +3386,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
|||
/* Carrier off reporting is important to ethtool even BEFORE open */
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
if (handle->flags & HNAE3_SUPPORT_VF)
|
||||
handle->reset_level = HNAE3_VF_RESET;
|
||||
else
|
||||
handle->reset_level = HNAE3_FUNC_RESET;
|
||||
|
||||
ret = hns3_get_ring_config(priv);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -3397,6 +3421,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
|||
/* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
|
||||
netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
|
||||
|
||||
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
|
||||
|
||||
return ret;
|
||||
|
||||
out_reg_netdev_fail:
|
||||
|
@ -3423,6 +3449,11 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
|
|||
if (netdev->reg_state != NETREG_UNINITIALIZED)
|
||||
unregister_netdev(netdev);
|
||||
|
||||
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
|
||||
netdev_warn(netdev, "already uninitialized\n");
|
||||
goto out_netdev_free;
|
||||
}
|
||||
|
||||
hns3_del_all_fd_rules(netdev, true);
|
||||
|
||||
hns3_force_clear_all_rx_ring(handle);
|
||||
|
@ -3443,6 +3474,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
|
|||
|
||||
priv->ring_data = NULL;
|
||||
|
||||
out_netdev_free:
|
||||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
|
@ -3708,8 +3740,22 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
|
|||
|
||||
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
|
||||
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
||||
struct net_device *ndev = kinfo->netdev;
|
||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||
|
||||
if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
|
||||
return 0;
|
||||
|
||||
/* it is cumbersome for hardware to pick-and-choose entries for deletion
|
||||
* from table space. Hence, for function reset software intervention is
|
||||
* required to delete the entries
|
||||
*/
|
||||
if (hns3_dev_ongoing_func_reset(ae_dev)) {
|
||||
hns3_remove_hw_addr(ndev);
|
||||
hns3_del_all_fd_rules(ndev, false);
|
||||
}
|
||||
|
||||
if (!netif_running(ndev))
|
||||
return 0;
|
||||
|
@ -3720,6 +3766,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
|
|||
static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
||||
struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
|
||||
int ret = 0;
|
||||
|
||||
if (netif_running(kinfo->netdev)) {
|
||||
|
@ -3729,9 +3776,10 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
|
|||
"hns net up fail, ret=%d!\n", ret);
|
||||
return ret;
|
||||
}
|
||||
handle->last_reset_time = jiffies;
|
||||
}
|
||||
|
||||
clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3782,16 +3830,22 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
|
|||
priv->ring_data = NULL;
|
||||
}
|
||||
|
||||
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
|
||||
struct net_device *netdev = handle->kinfo.netdev;
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
int ret;
|
||||
|
||||
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
|
||||
netdev_warn(netdev, "already uninitialized\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
hns3_force_clear_all_rx_ring(handle);
|
||||
|
||||
ret = hns3_nic_uninit_vector_data(priv);
|
||||
|
@ -3806,14 +3860,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
|||
if (ret)
|
||||
netdev_err(netdev, "uninit ring error\n");
|
||||
|
||||
/* it is cumbersome for hardware to pick-and-choose entries for deletion
|
||||
* from table space. Hence, for function reset software intervention is
|
||||
* required to delete the entries
|
||||
*/
|
||||
if (hns3_dev_ongoing_func_reset(ae_dev)) {
|
||||
hns3_remove_hw_addr(netdev);
|
||||
hns3_del_all_fd_rules(netdev, false);
|
||||
}
|
||||
clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ extern const char hns3_driver_version[];
|
|||
enum hns3_nic_state {
|
||||
HNS3_NIC_STATE_TESTING,
|
||||
HNS3_NIC_STATE_RESETTING,
|
||||
HNS3_NIC_STATE_REINITING,
|
||||
HNS3_NIC_STATE_INITED,
|
||||
HNS3_NIC_STATE_DOWN,
|
||||
HNS3_NIC_STATE_DISABLED,
|
||||
HNS3_NIC_STATE_REMOVING,
|
||||
|
@ -47,7 +47,7 @@ enum hns3_nic_state {
|
|||
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
|
||||
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
|
||||
#define HNS3_RING_ASID_REG 0x0008C
|
||||
#define HNS3_RING_RX_VM_REG 0x00090
|
||||
#define HNS3_RING_EN_REG 0x00090
|
||||
#define HNS3_RING_T0_BE_RST 0x00094
|
||||
#define HNS3_RING_COULD_BE_RST 0x00098
|
||||
#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
|
||||
|
@ -194,6 +194,8 @@ enum hns3_nic_state {
|
|||
#define HNS3_VECTOR_RL_OFFSET 0x900
|
||||
#define HNS3_VECTOR_RL_EN_B 6
|
||||
|
||||
#define HNS3_RING_EN_B 0
|
||||
|
||||
enum hns3_pkt_l3t_type {
|
||||
HNS3_L3T_NONE,
|
||||
HNS3_L3T_IPV6,
|
||||
|
@ -577,6 +579,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring)
|
|||
return ring->next_to_use == ring->next_to_clean;
|
||||
}
|
||||
|
||||
static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
|
||||
{
|
||||
return readl(base + reg);
|
||||
}
|
||||
|
||||
static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
|
||||
{
|
||||
u8 __iomem *reg_addr = READ_ONCE(base);
|
||||
|
@ -589,6 +596,16 @@ static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
|
|||
return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
|
||||
}
|
||||
|
||||
#define hns3_read_dev(a, reg) \
|
||||
hns3_read_reg((a)->io_base, (reg))
|
||||
|
||||
static inline bool hns3_nic_resetting(struct net_device *netdev)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
||||
return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
|
||||
}
|
||||
|
||||
#define hns3_write_dev(a, reg, value) \
|
||||
hns3_write_reg((a)->io_base, (reg), (value))
|
||||
|
||||
|
|
|
@ -291,6 +291,11 @@ static void hns3_self_test(struct net_device *ndev,
|
|||
int test_index = 0;
|
||||
u32 i;
|
||||
|
||||
if (hns3_nic_resetting(ndev)) {
|
||||
netdev_err(ndev, "dev resetting!");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Only do offline selftest, or pass by default */
|
||||
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
|
||||
return;
|
||||
|
@ -530,6 +535,11 @@ static void hns3_get_ringparam(struct net_device *netdev,
|
|||
struct hnae3_handle *h = priv->ae_handle;
|
||||
int queue_num = h->kinfo.num_tqps;
|
||||
|
||||
if (hns3_nic_resetting(netdev)) {
|
||||
netdev_err(netdev, "dev resetting!");
|
||||
return;
|
||||
}
|
||||
|
||||
param->tx_max_pending = HNS3_RING_MAX_PENDING;
|
||||
param->rx_max_pending = HNS3_RING_MAX_PENDING;
|
||||
|
||||
|
@ -760,6 +770,9 @@ static int hns3_set_ringparam(struct net_device *ndev,
|
|||
u32 old_desc_num, new_desc_num;
|
||||
int ret;
|
||||
|
||||
if (hns3_nic_resetting(ndev))
|
||||
return -EBUSY;
|
||||
|
||||
if (param->rx_mini_pending || param->rx_jumbo_pending)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -872,6 +885,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
|
|||
struct hnae3_handle *h = priv->ae_handle;
|
||||
u16 queue_num = h->kinfo.num_tqps;
|
||||
|
||||
if (hns3_nic_resetting(netdev))
|
||||
return -EBUSY;
|
||||
|
||||
if (queue >= queue_num) {
|
||||
netdev_err(netdev,
|
||||
"Invalid queue value %d! Queue max id=%d\n",
|
||||
|
@ -1033,6 +1049,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
if (hns3_nic_resetting(netdev))
|
||||
return -EBUSY;
|
||||
|
||||
ret = hns3_check_coalesce_para(netdev, cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -2145,6 +2145,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
|||
|
||||
/* check for vector0 reset event sources */
|
||||
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
|
||||
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
|
||||
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
|
||||
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
|
||||
|
@ -2152,6 +2153,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
|||
}
|
||||
|
||||
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "core reset interrupt\n");
|
||||
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
|
||||
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
|
||||
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
|
||||
|
@ -2159,6 +2161,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
|||
}
|
||||
|
||||
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
|
||||
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
|
||||
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
|
||||
return HCLGE_VECTOR0_EVENT_RST;
|
||||
|
@ -2308,13 +2311,44 @@ static int hclge_notify_client(struct hclge_dev *hdev,
|
|||
int ret;
|
||||
|
||||
ret = client->ops->reset_notify(handle, type);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"notify nic client failed %d(%d)\n", type, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_notify_roce_client(struct hclge_dev *hdev,
|
||||
enum hnae3_reset_notify_type type)
|
||||
{
|
||||
struct hnae3_client *client = hdev->roce_client;
|
||||
int ret = 0;
|
||||
u16 i;
|
||||
|
||||
if (!client)
|
||||
return 0;
|
||||
|
||||
if (!client->ops->reset_notify)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
|
||||
struct hnae3_handle *handle = &hdev->vport[i].roce;
|
||||
|
||||
ret = client->ops->reset_notify(handle, type);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"notify roce client failed %d(%d)",
|
||||
type, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hclge_reset_wait(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_RESET_WATI_MS 100
|
||||
|
@ -2396,7 +2430,6 @@ static void hclge_do_reset(struct hclge_dev *hdev)
|
|||
break;
|
||||
case HNAE3_FUNC_RESET:
|
||||
dev_info(&pdev->dev, "PF Reset requested\n");
|
||||
hclge_func_reset_cmd(hdev, 0);
|
||||
/* schedule again to check later */
|
||||
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
|
||||
hclge_reset_task_schedule(hdev);
|
||||
|
@ -2414,20 +2447,25 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
|
|||
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
|
||||
|
||||
/* return the highest priority reset level amongst all */
|
||||
if (test_bit(HNAE3_GLOBAL_RESET, addr))
|
||||
rst_level = HNAE3_GLOBAL_RESET;
|
||||
else if (test_bit(HNAE3_CORE_RESET, addr))
|
||||
rst_level = HNAE3_CORE_RESET;
|
||||
else if (test_bit(HNAE3_IMP_RESET, addr))
|
||||
if (test_bit(HNAE3_IMP_RESET, addr)) {
|
||||
rst_level = HNAE3_IMP_RESET;
|
||||
else if (test_bit(HNAE3_FUNC_RESET, addr))
|
||||
clear_bit(HNAE3_IMP_RESET, addr);
|
||||
clear_bit(HNAE3_GLOBAL_RESET, addr);
|
||||
clear_bit(HNAE3_CORE_RESET, addr);
|
||||
clear_bit(HNAE3_FUNC_RESET, addr);
|
||||
} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
|
||||
rst_level = HNAE3_GLOBAL_RESET;
|
||||
clear_bit(HNAE3_GLOBAL_RESET, addr);
|
||||
clear_bit(HNAE3_CORE_RESET, addr);
|
||||
clear_bit(HNAE3_FUNC_RESET, addr);
|
||||
} else if (test_bit(HNAE3_CORE_RESET, addr)) {
|
||||
rst_level = HNAE3_CORE_RESET;
|
||||
clear_bit(HNAE3_CORE_RESET, addr);
|
||||
clear_bit(HNAE3_FUNC_RESET, addr);
|
||||
} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
|
||||
rst_level = HNAE3_FUNC_RESET;
|
||||
|
||||
/* now, clear all other resets */
|
||||
clear_bit(HNAE3_GLOBAL_RESET, addr);
|
||||
clear_bit(HNAE3_CORE_RESET, addr);
|
||||
clear_bit(HNAE3_IMP_RESET, addr);
|
||||
clear_bit(HNAE3_FUNC_RESET, addr);
|
||||
clear_bit(HNAE3_FUNC_RESET, addr);
|
||||
}
|
||||
|
||||
return rst_level;
|
||||
}
|
||||
|
@ -2457,39 +2495,146 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
|
|||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
}
|
||||
|
||||
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (hdev->reset_type) {
|
||||
case HNAE3_FUNC_RESET:
|
||||
ret = hclge_func_reset_cmd(hdev, 0);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"assertting function reset fail %d!\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* After performaning pf reset, it is not necessary to do the
|
||||
* mailbox handling or send any command to firmware, because
|
||||
* any mailbox handling or command to firmware is only valid
|
||||
* after hclge_cmd_init is called.
|
||||
*/
|
||||
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
dev_info(&hdev->pdev->dev, "prepare wait ok\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
|
||||
{
|
||||
#define MAX_RESET_FAIL_CNT 5
|
||||
#define RESET_UPGRADE_DELAY_SEC 10
|
||||
|
||||
if (hdev->reset_pending) {
|
||||
dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
|
||||
hdev->reset_pending);
|
||||
return true;
|
||||
} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
|
||||
(hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
|
||||
BIT(HCLGE_IMP_RESET_BIT))) {
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"reset failed because IMP Reset is pending\n");
|
||||
hclge_clear_reset_cause(hdev);
|
||||
return false;
|
||||
} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
|
||||
hdev->reset_fail_cnt++;
|
||||
if (is_timeout) {
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"re-schedule to wait for hw reset done\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
|
||||
hclge_clear_reset_cause(hdev);
|
||||
mod_timer(&hdev->reset_timer,
|
||||
jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
hclge_clear_reset_cause(hdev);
|
||||
dev_err(&hdev->pdev->dev, "Reset fail!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
static void hclge_reset(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
|
||||
struct hnae3_handle *handle;
|
||||
bool is_timeout = false;
|
||||
int ret;
|
||||
|
||||
/* Initialize ae_dev reset status as well, in case enet layer wants to
|
||||
* know if device is undergoing reset
|
||||
*/
|
||||
ae_dev->reset_type = hdev->reset_type;
|
||||
hdev->reset_count++;
|
||||
hdev->last_reset_time = jiffies;
|
||||
/* perform reset of the stack & ae device for a client */
|
||||
handle = &hdev->vport[0].nic;
|
||||
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
|
||||
if (ret)
|
||||
goto err_reset;
|
||||
|
||||
rtnl_lock();
|
||||
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
||||
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
||||
if (ret)
|
||||
goto err_reset_lock;
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
if (!hclge_reset_wait(hdev)) {
|
||||
rtnl_lock();
|
||||
hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
|
||||
hclge_reset_ae_dev(hdev->ae_dev);
|
||||
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
|
||||
ret = hclge_reset_prepare_wait(hdev);
|
||||
if (ret)
|
||||
goto err_reset;
|
||||
|
||||
hclge_clear_reset_cause(hdev);
|
||||
} else {
|
||||
rtnl_lock();
|
||||
/* schedule again to check pending resets later */
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
hclge_reset_task_schedule(hdev);
|
||||
if (hclge_reset_wait(hdev)) {
|
||||
is_timeout = true;
|
||||
goto err_reset;
|
||||
}
|
||||
|
||||
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
||||
handle->last_reset_time = jiffies;
|
||||
ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
|
||||
if (ret)
|
||||
goto err_reset;
|
||||
|
||||
rtnl_lock();
|
||||
ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
|
||||
if (ret)
|
||||
goto err_reset_lock;
|
||||
|
||||
ret = hclge_reset_ae_dev(hdev->ae_dev);
|
||||
if (ret)
|
||||
goto err_reset_lock;
|
||||
|
||||
ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
|
||||
if (ret)
|
||||
goto err_reset_lock;
|
||||
|
||||
hclge_clear_reset_cause(hdev);
|
||||
|
||||
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
||||
if (ret)
|
||||
goto err_reset_lock;
|
||||
|
||||
rtnl_unlock();
|
||||
ae_dev->reset_type = HNAE3_NONE_RESET;
|
||||
|
||||
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
|
||||
if (ret)
|
||||
goto err_reset;
|
||||
|
||||
ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
|
||||
if (ret)
|
||||
goto err_reset;
|
||||
|
||||
return;
|
||||
|
||||
err_reset_lock:
|
||||
rtnl_unlock();
|
||||
err_reset:
|
||||
if (hclge_reset_err_handle(hdev, is_timeout))
|
||||
hclge_reset_task_schedule(hdev);
|
||||
}
|
||||
|
||||
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
|
||||
|
@ -2515,20 +2660,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
|
|||
if (!handle)
|
||||
handle = &hdev->vport[0].nic;
|
||||
|
||||
if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
|
||||
if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
|
||||
return;
|
||||
else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
|
||||
handle->reset_level = HNAE3_FUNC_RESET;
|
||||
else if (hdev->default_reset_request)
|
||||
hdev->reset_level =
|
||||
hclge_get_reset_level(hdev,
|
||||
&hdev->default_reset_request);
|
||||
else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
|
||||
hdev->reset_level = HNAE3_FUNC_RESET;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
|
||||
handle->reset_level);
|
||||
hdev->reset_level);
|
||||
|
||||
/* request reset & schedule reset task */
|
||||
set_bit(handle->reset_level, &hdev->reset_request);
|
||||
set_bit(hdev->reset_level, &hdev->reset_request);
|
||||
hclge_reset_task_schedule(hdev);
|
||||
|
||||
if (handle->reset_level < HNAE3_GLOBAL_RESET)
|
||||
handle->reset_level++;
|
||||
if (hdev->reset_level < HNAE3_GLOBAL_RESET)
|
||||
hdev->reset_level++;
|
||||
}
|
||||
|
||||
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
struct hclge_dev *hdev = ae_dev->priv;
|
||||
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
static void hclge_reset_timer(struct timer_list *t)
|
||||
{
|
||||
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
|
||||
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"triggering global reset in reset timer\n");
|
||||
set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
|
||||
hclge_reset_event(hdev->pdev, NULL);
|
||||
}
|
||||
|
||||
static void hclge_reset_subtask(struct hclge_dev *hdev)
|
||||
|
@ -2542,6 +2709,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
|
|||
* b. else, we can come back later to check this status so re-sched
|
||||
* now.
|
||||
*/
|
||||
hdev->last_reset_time = jiffies;
|
||||
hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
|
||||
if (hdev->reset_type != HNAE3_NONE_RESET)
|
||||
hclge_reset(hdev);
|
||||
|
@ -4336,8 +4504,12 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
|
|||
struct hlist_node *node;
|
||||
int ret;
|
||||
|
||||
/* Return ok here, because reset error handling will check this
|
||||
* return value. If error is returned here, the reset process will
|
||||
* fail.
|
||||
*/
|
||||
if (!hnae3_dev_fd_supported(hdev))
|
||||
return -EOPNOTSUPP;
|
||||
return 0;
|
||||
|
||||
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
|
||||
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
|
||||
|
@ -4592,6 +4764,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
|
||||
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
|
||||
}
|
||||
|
||||
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
|
||||
}
|
||||
|
||||
static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
return hdev->reset_count;
|
||||
}
|
||||
|
||||
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
|
@ -4805,10 +5002,6 @@ static int hclge_ae_start(struct hnae3_handle *handle)
|
|||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vport->alloc_tqps; i++)
|
||||
hclge_tqp_enable(hdev, i, 0, true);
|
||||
|
||||
/* mac enable */
|
||||
hclge_cfg_mac_mode(hdev, true);
|
||||
|
@ -4828,7 +5021,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
|
|||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int i;
|
||||
|
||||
set_bit(HCLGE_STATE_DOWN, &hdev->state);
|
||||
|
||||
|
@ -4836,14 +5028,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
|
|||
cancel_work_sync(&hdev->service_task);
|
||||
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
|
||||
|
||||
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
|
||||
/* If it is not PF reset, the firmware will disable the MAC,
|
||||
* so it only need to stop phy here.
|
||||
*/
|
||||
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
|
||||
hdev->reset_type != HNAE3_FUNC_RESET) {
|
||||
hclge_mac_stop_phy(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < vport->alloc_tqps; i++)
|
||||
hclge_tqp_enable(hdev, i, 0, false);
|
||||
|
||||
/* Mac disable */
|
||||
hclge_cfg_mac_mode(hdev, false);
|
||||
|
||||
|
@ -6612,6 +6805,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
|
|||
|
||||
if (hdev->service_timer.function)
|
||||
del_timer_sync(&hdev->service_timer);
|
||||
if (hdev->reset_timer.function)
|
||||
del_timer_sync(&hdev->reset_timer);
|
||||
if (hdev->service_task.func)
|
||||
cancel_work_sync(&hdev->service_task);
|
||||
if (hdev->rst_service_task.func)
|
||||
|
@ -6635,6 +6830,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
|||
hdev->pdev = pdev;
|
||||
hdev->ae_dev = ae_dev;
|
||||
hdev->reset_type = HNAE3_NONE_RESET;
|
||||
hdev->reset_level = HNAE3_FUNC_RESET;
|
||||
ae_dev->priv = hdev;
|
||||
|
||||
ret = hclge_pci_init(hdev);
|
||||
|
@ -6769,6 +6965,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
|||
hclge_dcb_ops_set(hdev);
|
||||
|
||||
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
|
||||
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
|
||||
INIT_WORK(&hdev->service_task, hclge_service_task);
|
||||
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
|
||||
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
|
||||
|
@ -6779,6 +6976,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
|||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
hclge_state_init(hdev);
|
||||
hdev->last_reset_time = jiffies;
|
||||
|
||||
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
|
||||
return 0;
|
||||
|
@ -7321,6 +7519,7 @@ static const struct hnae3_ae_ops hclge_ops = {
|
|||
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
|
||||
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
|
||||
.reset_event = hclge_reset_event,
|
||||
.set_default_reset_request = hclge_set_def_reset_request,
|
||||
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
|
||||
.set_channels = hclge_set_channels,
|
||||
.get_channels = hclge_get_channels,
|
||||
|
@ -7337,6 +7536,9 @@ static const struct hnae3_ae_ops hclge_ops = {
|
|||
.restore_fd_rules = hclge_restore_fd_entries,
|
||||
.enable_fd = hclge_enable_fd,
|
||||
.process_hw_error = hclge_process_ras_hw_error,
|
||||
.get_hw_reset_stat = hclge_get_hw_reset_stat,
|
||||
.ae_dev_resetting = hclge_ae_dev_resetting,
|
||||
.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
|
||||
};
|
||||
|
||||
static struct hnae3_ae_algo ae_algo = {
|
||||
|
|
|
@ -102,6 +102,7 @@ enum HLCGE_PORT_TYPE {
|
|||
#define HCLGE_GLOBAL_RESET_REG 0x20A00
|
||||
#define HCLGE_GLOBAL_RESET_BIT 0
|
||||
#define HCLGE_CORE_RESET_BIT 1
|
||||
#define HCLGE_IMP_RESET_BIT 2
|
||||
#define HCLGE_FUN_RST_ING 0x20C00
|
||||
#define HCLGE_FUN_RST_ING_B 0
|
||||
|
||||
|
@ -593,10 +594,15 @@ struct hclge_dev {
|
|||
struct hclge_misc_vector misc_vector;
|
||||
struct hclge_hw_stats hw_stats;
|
||||
unsigned long state;
|
||||
unsigned long last_reset_time;
|
||||
|
||||
enum hnae3_reset_type reset_type;
|
||||
enum hnae3_reset_type reset_level;
|
||||
unsigned long default_reset_request;
|
||||
unsigned long reset_request; /* reset has been requested */
|
||||
unsigned long reset_pending; /* client rst is pending to be served */
|
||||
unsigned long reset_count; /* the number of reset has been done */
|
||||
u32 reset_fail_cnt;
|
||||
u32 fw_version;
|
||||
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
|
||||
u16 num_tqps; /* Num task queue pairs of this PF */
|
||||
|
@ -644,6 +650,7 @@ struct hclge_dev {
|
|||
unsigned long service_timer_period;
|
||||
unsigned long service_timer_previous;
|
||||
struct timer_list service_timer;
|
||||
struct timer_list reset_timer;
|
||||
struct work_struct service_task;
|
||||
struct work_struct rst_service_task;
|
||||
struct work_struct mbx_service_task;
|
||||
|
|
|
@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
|
||||
{
|
||||
struct hclgevf_dev *hdev = ring->dev;
|
||||
struct hclgevf_hw *hw = &hdev->hw;
|
||||
u32 reg_val;
|
||||
|
||||
if (ring->flag == HCLGEVF_TYPE_CSQ) {
|
||||
reg_val = (u32)ring->desc_dma_addr;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
|
||||
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
|
||||
|
||||
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
|
||||
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
|
||||
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
|
||||
} else {
|
||||
reg_val = (u32)ring->desc_dma_addr;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
|
||||
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
|
||||
|
||||
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
|
||||
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
|
||||
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
|
||||
{
|
||||
hclgevf_cmd_config_regs(&hw->cmq.csq);
|
||||
hclgevf_cmd_config_regs(&hw->cmq.crq);
|
||||
}
|
||||
|
||||
static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
|
||||
{
|
||||
int size = ring->desc_num * sizeof(struct hclgevf_desc);
|
||||
|
@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
|
|||
}
|
||||
}
|
||||
|
||||
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
|
||||
struct hclgevf_cmq_ring *ring)
|
||||
static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
|
||||
{
|
||||
struct hclgevf_hw *hw = &hdev->hw;
|
||||
int ring_type = ring->flag;
|
||||
u32 reg_val;
|
||||
struct hclgevf_cmq_ring *ring =
|
||||
(ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
|
||||
int ret;
|
||||
|
||||
ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
|
||||
spin_lock_init(&ring->lock);
|
||||
ring->next_to_clean = 0;
|
||||
ring->next_to_use = 0;
|
||||
ring->dev = hdev;
|
||||
ring->flag = ring_type;
|
||||
|
||||
/* allocate CSQ/CRQ descriptor */
|
||||
ret = hclgevf_alloc_cmd_desc(ring);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
|
||||
(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* initialize the hardware registers with csq/crq dma-address,
|
||||
* descriptor number, head & tail pointers
|
||||
*/
|
||||
switch (ring_type) {
|
||||
case HCLGEVF_TYPE_CSQ:
|
||||
reg_val = (u32)ring->desc_dma_addr;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
|
||||
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
|
||||
|
||||
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
|
||||
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
|
||||
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
|
||||
return 0;
|
||||
case HCLGEVF_TYPE_CRQ:
|
||||
reg_val = (u32)ring->desc_dma_addr;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
|
||||
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
|
||||
|
||||
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
|
||||
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
|
||||
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
|
||||
|
@ -282,55 +283,73 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
|
|||
return status;
|
||||
}
|
||||
|
||||
int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Setup the lock for command queue */
|
||||
spin_lock_init(&hdev->hw.cmq.csq.lock);
|
||||
spin_lock_init(&hdev->hw.cmq.crq.lock);
|
||||
|
||||
hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
|
||||
hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
|
||||
hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
|
||||
|
||||
ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"CSQ ring setup error %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"CRQ ring setup error %d\n", ret);
|
||||
goto err_csq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_csq:
|
||||
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hclgevf_cmd_init(struct hclgevf_dev *hdev)
|
||||
{
|
||||
u32 version;
|
||||
int ret;
|
||||
|
||||
/* setup Tx write back timeout */
|
||||
hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
|
||||
|
||||
/* setup queue CSQ/CRQ rings */
|
||||
hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
|
||||
ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed(%d) to initialize CSQ ring\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
|
||||
ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed(%d) to initialize CRQ ring\n", ret);
|
||||
goto err_csq;
|
||||
}
|
||||
spin_lock_bh(&hdev->hw.cmq.csq.lock);
|
||||
spin_lock_bh(&hdev->hw.cmq.crq.lock);
|
||||
|
||||
/* initialize the pointers of async rx queue of mailbox */
|
||||
hdev->arq.hdev = hdev;
|
||||
hdev->arq.head = 0;
|
||||
hdev->arq.tail = 0;
|
||||
hdev->arq.count = 0;
|
||||
hdev->hw.cmq.csq.next_to_clean = 0;
|
||||
hdev->hw.cmq.csq.next_to_use = 0;
|
||||
hdev->hw.cmq.crq.next_to_clean = 0;
|
||||
hdev->hw.cmq.crq.next_to_use = 0;
|
||||
|
||||
hclgevf_cmd_init_regs(&hdev->hw);
|
||||
|
||||
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
|
||||
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
|
||||
|
||||
/* get firmware version */
|
||||
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed(%d) to query firmware version\n", ret);
|
||||
goto err_crq;
|
||||
return ret;
|
||||
}
|
||||
hdev->fw_version = version;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
|
||||
|
||||
return 0;
|
||||
err_crq:
|
||||
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
|
||||
err_csq:
|
||||
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
|
||||
|
|
|
@ -256,6 +256,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
|
|||
|
||||
int hclgevf_cmd_init(struct hclgevf_dev *hdev);
|
||||
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
|
||||
int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
|
||||
|
||||
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
|
||||
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
|
||||
|
|
|
@ -956,13 +956,6 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
|
|||
return status;
|
||||
}
|
||||
|
||||
static int hclgevf_get_queue_id(struct hnae3_queue *queue)
|
||||
{
|
||||
struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
|
||||
|
||||
return tqp->index;
|
||||
}
|
||||
|
||||
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
||||
|
@ -1165,6 +1158,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
|
|||
{
|
||||
int ret;
|
||||
|
||||
hdev->reset_count++;
|
||||
rtnl_lock();
|
||||
|
||||
/* bring down the nic to stop any ongoing TX/RX */
|
||||
|
@ -1219,6 +1213,19 @@ static int hclgevf_do_reset(struct hclgevf_dev *hdev)
|
|||
return status;
|
||||
}
|
||||
|
||||
static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
|
||||
unsigned long *addr)
|
||||
{
|
||||
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
|
||||
|
||||
if (test_bit(HNAE3_VF_RESET, addr)) {
|
||||
rst_level = HNAE3_VF_RESET;
|
||||
clear_bit(HNAE3_VF_RESET, addr);
|
||||
}
|
||||
|
||||
return rst_level;
|
||||
}
|
||||
|
||||
static void hclgevf_reset_event(struct pci_dev *pdev,
|
||||
struct hnae3_handle *handle)
|
||||
{
|
||||
|
@ -1226,13 +1233,26 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
|
|||
|
||||
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
|
||||
|
||||
handle->reset_level = HNAE3_VF_RESET;
|
||||
if (!hdev->default_reset_request)
|
||||
hdev->reset_level =
|
||||
hclgevf_get_reset_level(hdev,
|
||||
&hdev->default_reset_request);
|
||||
else
|
||||
hdev->reset_level = HNAE3_VF_RESET;
|
||||
|
||||
/* reset of this VF requested */
|
||||
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
|
||||
handle->last_reset_time = jiffies;
|
||||
hdev->last_reset_time = jiffies;
|
||||
}
|
||||
|
||||
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
|
||||
|
@ -1352,7 +1372,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
|
|||
*/
|
||||
if (hdev->reset_attempts > 3) {
|
||||
/* prepare for full reset of stack + pcie interface */
|
||||
hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
|
||||
hdev->reset_level = HNAE3_VF_FULL_RESET;
|
||||
|
||||
/* "defer" schedule the reset task again */
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
|
@ -1566,21 +1586,7 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
|
|||
|
||||
static int hclgevf_ae_start(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
int i, queue_id;
|
||||
|
||||
for (i = 0; i < kinfo->num_tqps; i++) {
|
||||
/* ring enable */
|
||||
queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
|
||||
if (queue_id < 0) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"Get invalid queue id, ignore it\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
hclgevf_tqp_enable(hdev, queue_id, 0, true);
|
||||
}
|
||||
|
||||
/* reset tqp stats */
|
||||
hclgevf_reset_tqp_stats(handle);
|
||||
|
@ -1595,24 +1601,10 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
|
|||
|
||||
static void hclgevf_ae_stop(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
int i, queue_id;
|
||||
|
||||
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
|
||||
|
||||
for (i = 0; i < kinfo->num_tqps; i++) {
|
||||
/* Ring disable */
|
||||
queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
|
||||
if (queue_id < 0) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"Get invalid queue id, ignore it\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
hclgevf_tqp_enable(hdev, queue_id, 0, false);
|
||||
}
|
||||
|
||||
/* reset tqp stats */
|
||||
hclgevf_reset_tqp_stats(handle);
|
||||
del_timer_sync(&hdev->service_timer);
|
||||
|
@ -1974,6 +1966,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = hclgevf_cmd_queue_init(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
|
||||
goto err_cmd_queue_init;
|
||||
}
|
||||
|
||||
ret = hclgevf_cmd_init(hdev);
|
||||
if (ret)
|
||||
goto err_cmd_init;
|
||||
|
@ -1983,16 +1981,17 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
|||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Query vf status error, ret = %d.\n", ret);
|
||||
goto err_query_vf;
|
||||
goto err_cmd_init;
|
||||
}
|
||||
|
||||
ret = hclgevf_init_msi(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
|
||||
goto err_query_vf;
|
||||
goto err_cmd_init;
|
||||
}
|
||||
|
||||
hclgevf_state_init(hdev);
|
||||
hdev->reset_level = HNAE3_VF_RESET;
|
||||
|
||||
ret = hclgevf_misc_irq_init(hdev);
|
||||
if (ret) {
|
||||
|
@ -2034,6 +2033,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
|||
goto err_config;
|
||||
}
|
||||
|
||||
hdev->last_reset_time = jiffies;
|
||||
pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
|
||||
|
||||
return 0;
|
||||
|
@ -2043,9 +2043,9 @@ err_config:
|
|||
err_misc_irq_init:
|
||||
hclgevf_state_uninit(hdev);
|
||||
hclgevf_uninit_msi(hdev);
|
||||
err_query_vf:
|
||||
hclgevf_cmd_uninit(hdev);
|
||||
err_cmd_init:
|
||||
hclgevf_cmd_uninit(hdev);
|
||||
err_cmd_queue_init:
|
||||
hclgevf_pci_uninit(hdev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2159,6 +2159,27 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle,
|
|||
*media_type = hdev->hw.mac.media_type;
|
||||
}
|
||||
|
||||
static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
|
||||
return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
|
||||
}
|
||||
|
||||
static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
|
||||
return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
|
||||
}
|
||||
|
||||
static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
|
||||
return hdev->reset_count;
|
||||
}
|
||||
|
||||
static const struct hnae3_ae_ops hclgevf_ops = {
|
||||
.init_ae_dev = hclgevf_init_ae_dev,
|
||||
.uninit_ae_dev = hclgevf_uninit_ae_dev,
|
||||
|
@ -2193,11 +2214,15 @@ static const struct hnae3_ae_ops hclgevf_ops = {
|
|||
.set_vlan_filter = hclgevf_set_vlan_filter,
|
||||
.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
|
||||
.reset_event = hclgevf_reset_event,
|
||||
.set_default_reset_request = hclgevf_set_def_reset_request,
|
||||
.get_channels = hclgevf_get_channels,
|
||||
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
|
||||
.get_status = hclgevf_get_status,
|
||||
.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
|
||||
.get_media_type = hclgevf_get_media_type,
|
||||
.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
|
||||
.ae_dev_resetting = hclgevf_ae_dev_resetting,
|
||||
.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
|
||||
};
|
||||
|
||||
static struct hnae3_ae_algo ae_algovf = {
|
||||
|
|
|
@ -145,10 +145,14 @@ struct hclgevf_dev {
|
|||
struct hclgevf_misc_vector misc_vector;
|
||||
struct hclgevf_rss_cfg rss_cfg;
|
||||
unsigned long state;
|
||||
unsigned long default_reset_request;
|
||||
unsigned long last_reset_time;
|
||||
enum hnae3_reset_type reset_level;
|
||||
|
||||
#define HCLGEVF_RESET_REQUESTED 0
|
||||
#define HCLGEVF_RESET_PENDING 1
|
||||
unsigned long reset_state; /* requested, pending */
|
||||
unsigned long reset_count; /* the number of reset has been done */
|
||||
u32 reset_attempts;
|
||||
|
||||
u32 fw_version;
|
||||
|
@ -196,14 +200,14 @@ static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
|
|||
{
|
||||
return (hdev &&
|
||||
(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
|
||||
(hdev->nic.reset_level == HNAE3_VF_RESET));
|
||||
(hdev->reset_level == HNAE3_VF_RESET));
|
||||
}
|
||||
|
||||
static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
|
||||
{
|
||||
return (hdev &&
|
||||
(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
|
||||
(hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
|
||||
(hdev->reset_level == HNAE3_VF_FULL_RESET));
|
||||
}
|
||||
|
||||
int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
|
||||
|
|
|
@ -267,7 +267,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
|
|||
* has been completely reset. After this stack should
|
||||
* eventually be re-initialized.
|
||||
*/
|
||||
hdev->nic.reset_level = HNAE3_VF_RESET;
|
||||
hdev->reset_level = HNAE3_VF_RESET;
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
|
||||
|
|
Loading…
Reference in New Issue