Merge branch 'hns3-next'
Peng Li says: ==================== fix some bugs for hns3 driver This patchset fix some bugs for hns3 driver. [Patch 1/6 - Patch 3/6] fix bugs related about VF driver. [Patch 3/6 - Patch 6/6] fix the bugs about ethtool_ops.set_channels. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
08a24239cd
|
@ -249,6 +249,16 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
|
||||
{
|
||||
u16 free_tqps, max_rss_size, max_tqps;
|
||||
|
||||
h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
|
||||
max_tqps = h->kinfo.num_tc * max_rss_size;
|
||||
|
||||
return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
|
||||
}
|
||||
|
||||
static int hns3_nic_net_up(struct net_device *netdev)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
@ -3013,7 +3023,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
|||
int ret;
|
||||
|
||||
netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
|
||||
handle->kinfo.num_tqps);
|
||||
hns3_get_max_available_channels(handle));
|
||||
if (!netdev)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3336,17 +3346,6 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u16 hns3_get_max_available_channels(struct net_device *netdev)
|
||||
{
|
||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
u16 free_tqps, max_rss_size, max_tqps;
|
||||
|
||||
h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
|
||||
max_tqps = h->kinfo.num_tc * max_rss_size;
|
||||
|
||||
return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
|
||||
}
|
||||
|
||||
static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
@ -3397,12 +3396,12 @@ int hns3_set_channels(struct net_device *netdev,
|
|||
if (ch->rx_count || ch->tx_count)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
|
||||
if (new_tqp_num > hns3_get_max_available_channels(h) ||
|
||||
new_tqp_num < kinfo->num_tc) {
|
||||
dev_err(&netdev->dev,
|
||||
"Change tqps fail, the tqp range is from %d to %d",
|
||||
kinfo->num_tc,
|
||||
hns3_get_max_available_channels(netdev));
|
||||
hns3_get_max_available_channels(h));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -3717,20 +3717,11 @@ static int hclge_ae_start(struct hnae3_handle *handle)
|
|||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int i, queue_id, ret;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < vport->alloc_tqps; i++) {
|
||||
/* todo clear interrupt */
|
||||
/* ring enable */
|
||||
queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
|
||||
if (queue_id < 0) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"Get invalid queue id, ignore it\n");
|
||||
continue;
|
||||
}
|
||||
for (i = 0; i < vport->alloc_tqps; i++)
|
||||
hclge_tqp_enable(hdev, i, 0, true);
|
||||
|
||||
hclge_tqp_enable(hdev, queue_id, 0, true);
|
||||
}
|
||||
/* mac enable */
|
||||
hclge_cfg_mac_mode(hdev, true);
|
||||
clear_bit(HCLGE_STATE_DOWN, &hdev->state);
|
||||
|
@ -3750,19 +3741,11 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
|
|||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int i, queue_id;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vport->alloc_tqps; i++) {
|
||||
/* Ring disable */
|
||||
queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
|
||||
if (queue_id < 0) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"Get invalid queue id, ignore it\n");
|
||||
continue;
|
||||
}
|
||||
for (i = 0; i < vport->alloc_tqps; i++)
|
||||
hclge_tqp_enable(hdev, i, 0, false);
|
||||
|
||||
hclge_tqp_enable(hdev, queue_id, 0, false);
|
||||
}
|
||||
/* Mac disable */
|
||||
hclge_cfg_mac_mode(hdev, false);
|
||||
|
||||
|
@ -4848,21 +4831,36 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
|
|||
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
|
||||
}
|
||||
|
||||
static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
|
||||
u16 queue_id)
|
||||
{
|
||||
struct hnae3_queue *queue;
|
||||
struct hclge_tqp *tqp;
|
||||
|
||||
queue = handle->kinfo.tqp[queue_id];
|
||||
tqp = container_of(queue, struct hclge_tqp, q);
|
||||
|
||||
return tqp->index;
|
||||
}
|
||||
|
||||
void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int reset_try_times = 0;
|
||||
int reset_status;
|
||||
u16 queue_gid;
|
||||
int ret;
|
||||
|
||||
queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
|
||||
|
||||
ret = hclge_tqp_enable(hdev, queue_id, 0, false);
|
||||
if (ret) {
|
||||
dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
|
||||
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
|
||||
if (ret) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"Send reset tqp cmd fail, ret = %d\n", ret);
|
||||
|
@ -4873,7 +4871,7 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
|
|||
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
|
||||
/* Wait for tqp hw reset */
|
||||
msleep(20);
|
||||
reset_status = hclge_get_reset_status(hdev, queue_id);
|
||||
reset_status = hclge_get_reset_status(hdev, queue_gid);
|
||||
if (reset_status)
|
||||
break;
|
||||
}
|
||||
|
@ -4883,7 +4881,7 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
|
|||
return;
|
||||
}
|
||||
|
||||
ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
|
||||
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
|
||||
if (ret) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"Deassert the soft reset fail, ret = %d\n", ret);
|
||||
|
|
|
@ -291,7 +291,7 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport,
|
|||
|
||||
/* get the queue related info */
|
||||
memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
|
||||
memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16));
|
||||
memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
|
||||
memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
|
||||
memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
|
||||
|
||||
|
@ -333,11 +333,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
|
|||
struct hclge_mbx_vf_to_pf_cmd *req;
|
||||
struct hclge_vport *vport;
|
||||
struct hclge_desc *desc;
|
||||
int ret;
|
||||
int ret, flag;
|
||||
|
||||
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
|
||||
/* handle all the mailbox requests in the queue */
|
||||
while (hnae_get_bit(crq->desc[crq->next_to_use].flag,
|
||||
HCLGE_CMDQ_RX_OUTVLD_B)) {
|
||||
while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) {
|
||||
desc = &crq->desc[crq->next_to_use];
|
||||
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
|
||||
|
||||
|
@ -410,7 +410,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
|
|||
req->msg[0]);
|
||||
break;
|
||||
}
|
||||
crq->desc[crq->next_to_use].flag = 0;
|
||||
hclge_mbx_ring_ptr_move_crq(crq);
|
||||
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
|
||||
}
|
||||
|
||||
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
|
||||
|
|
|
@ -1447,6 +1447,15 @@ static void hclgevf_get_channels(struct hnae3_handle *handle,
|
|||
ch->combined_count = hdev->num_tqps;
|
||||
}
|
||||
|
||||
static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
|
||||
u16 *free_tqps, u16 *max_rss_size)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
|
||||
*free_tqps = 0;
|
||||
*max_rss_size = hdev->rss_size_max;
|
||||
}
|
||||
|
||||
static const struct hnae3_ae_ops hclgevf_ops = {
|
||||
.init_ae_dev = hclgevf_init_ae_dev,
|
||||
.uninit_ae_dev = hclgevf_uninit_ae_dev,
|
||||
|
@ -1477,6 +1486,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
|
|||
.get_fw_version = hclgevf_get_fw_version,
|
||||
.set_vlan_filter = hclgevf_set_vlan_filter,
|
||||
.get_channels = hclgevf_get_channels,
|
||||
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
|
||||
};
|
||||
|
||||
static struct hnae3_ae_algo ae_algovf = {
|
||||
|
|
|
@ -171,6 +171,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
|
|||
req->msg[0]);
|
||||
break;
|
||||
}
|
||||
crq->desc[crq->next_to_use].flag = 0;
|
||||
hclge_mbx_ring_ptr_move_crq(crq);
|
||||
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue