Merge branch 'net-qed-qede-various-stability-fixes'
Alexander Lobakin says: ==================== net: qed/qede: various stability fixes This set addresses several near-critical issues that were observed and reproduced on different test and production configurations. v2: - don't split the "Fixes:" tag across several lines in patch 9; - no functional changes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6199496be3
|
@ -271,7 +271,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
|
||||||
vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
|
vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
|
||||||
}
|
}
|
||||||
|
|
||||||
iids->vf_cids += vf_cids * p_mngr->vf_count;
|
iids->vf_cids = vf_cids;
|
||||||
iids->tids += vf_tids * p_mngr->vf_count;
|
iids->tids += vf_tids * p_mngr->vf_count;
|
||||||
|
|
||||||
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
|
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
|
||||||
|
@ -465,6 +465,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
|
||||||
return p_blk;
|
return p_blk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
|
||||||
|
{
|
||||||
|
struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
|
||||||
|
u32 cli_idx, blk_idx;
|
||||||
|
|
||||||
|
for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
|
||||||
|
for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
|
||||||
|
clients[cli_idx].pf_blks[blk_idx].total_size = 0;
|
||||||
|
|
||||||
|
for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
|
||||||
|
clients[cli_idx].vf_blks[blk_idx].total_size = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
|
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
|
||||||
{
|
{
|
||||||
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||||
|
@ -484,6 +498,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
|
||||||
|
|
||||||
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
|
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
|
||||||
|
|
||||||
|
/* Reset all ILT blocks at the beginning of ILT computing in order
|
||||||
|
* to prevent memory allocation for irrelevant blocks afterwards.
|
||||||
|
*/
|
||||||
|
qed_cxt_ilt_blk_reset(p_hwfn);
|
||||||
|
|
||||||
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
|
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
|
||||||
"hwfn [%d] - Set context manager starting line to be 0x%08x\n",
|
"hwfn [%d] - Set context manager starting line to be 0x%08x\n",
|
||||||
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
|
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
|
||||||
|
|
|
@ -980,7 +980,7 @@ int qed_llh_add_mac_filter(struct qed_dev *cdev,
|
||||||
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
|
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
|
||||||
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
|
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
|
||||||
union qed_llh_filter filter = {};
|
union qed_llh_filter filter = {};
|
||||||
u8 filter_idx, abs_ppfid;
|
u8 filter_idx, abs_ppfid = 0;
|
||||||
u32 high, low, ref_cnt;
|
u32 high, low, ref_cnt;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
|
@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
|
||||||
|
|
||||||
void qed_resc_free(struct qed_dev *cdev)
|
void qed_resc_free(struct qed_dev *cdev)
|
||||||
{
|
{
|
||||||
|
struct qed_rdma_info *rdma_info;
|
||||||
|
struct qed_hwfn *p_hwfn;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (IS_VF(cdev)) {
|
if (IS_VF(cdev)) {
|
||||||
|
@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev)
|
||||||
qed_llh_free(cdev);
|
qed_llh_free(cdev);
|
||||||
|
|
||||||
for_each_hwfn(cdev, i) {
|
for_each_hwfn(cdev, i) {
|
||||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
p_hwfn = cdev->hwfns + i;
|
||||||
|
rdma_info = p_hwfn->p_rdma_info;
|
||||||
|
|
||||||
qed_cxt_mngr_free(p_hwfn);
|
qed_cxt_mngr_free(p_hwfn);
|
||||||
qed_qm_info_free(p_hwfn);
|
qed_qm_info_free(p_hwfn);
|
||||||
|
@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev)
|
||||||
qed_ooo_free(p_hwfn);
|
qed_ooo_free(p_hwfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (QED_IS_RDMA_PERSONALITY(p_hwfn))
|
if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
|
||||||
|
qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
|
||||||
qed_rdma_info_free(p_hwfn);
|
qed_rdma_info_free(p_hwfn);
|
||||||
|
}
|
||||||
|
|
||||||
qed_iov_free(p_hwfn);
|
qed_iov_free(p_hwfn);
|
||||||
qed_l2_free(p_hwfn);
|
qed_l2_free(p_hwfn);
|
||||||
|
|
|
@ -2836,8 +2836,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
|
|
||||||
|
|
||||||
return qed_iwarp_ll2_stop(p_hwfn);
|
return qed_iwarp_ll2_stop(p_hwfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
|
static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
|
||||||
|
|
|
@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
|
||||||
mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
|
mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
|
||||||
|
#define QED_VF_CHANNEL_USLEEP_DELAY 100
|
||||||
|
#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
|
||||||
|
#define QED_VF_CHANNEL_MSLEEP_DELAY 25
|
||||||
|
|
||||||
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
|
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
|
||||||
{
|
{
|
||||||
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
|
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
|
||||||
struct ustorm_trigger_vf_zone trigger;
|
struct ustorm_trigger_vf_zone trigger;
|
||||||
struct ustorm_vf_zone *zone_data;
|
struct ustorm_vf_zone *zone_data;
|
||||||
int rc = 0, time = 100;
|
int iter, rc = 0;
|
||||||
|
|
||||||
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
|
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
|
||||||
|
|
||||||
|
@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
|
||||||
REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
|
REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
|
||||||
|
|
||||||
/* When PF would be done with the response, it would write back to the
|
/* When PF would be done with the response, it would write back to the
|
||||||
* `done' address. Poll until then.
|
* `done' address from a coherent DMA zone. Poll until then.
|
||||||
*/
|
*/
|
||||||
while ((!*done) && time) {
|
|
||||||
msleep(25);
|
iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
|
||||||
time--;
|
while (!*done && iter--) {
|
||||||
|
udelay(QED_VF_CHANNEL_USLEEP_DELAY);
|
||||||
|
dma_rmb();
|
||||||
|
}
|
||||||
|
|
||||||
|
iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
|
||||||
|
while (!*done && iter--) {
|
||||||
|
msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
|
||||||
|
dma_rmb();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!*done) {
|
if (!*done) {
|
||||||
|
|
|
@ -1229,7 +1229,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
|
||||||
|
|
||||||
/* PTP not supported on VFs */
|
/* PTP not supported on VFs */
|
||||||
if (!is_vf)
|
if (!is_vf)
|
||||||
qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
|
qede_ptp_enable(edev);
|
||||||
|
|
||||||
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
|
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
|
||||||
|
|
||||||
|
@ -1318,6 +1318,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
|
||||||
if (system_state == SYSTEM_POWER_OFF)
|
if (system_state == SYSTEM_POWER_OFF)
|
||||||
return;
|
return;
|
||||||
qed_ops->common->remove(cdev);
|
qed_ops->common->remove(cdev);
|
||||||
|
edev->cdev = NULL;
|
||||||
|
|
||||||
/* Since this can happen out-of-sync with other flows,
|
/* Since this can happen out-of-sync with other flows,
|
||||||
* don't release the netdevice until after slowpath stop
|
* don't release the netdevice until after slowpath stop
|
||||||
|
|
|
@ -412,6 +412,7 @@ void qede_ptp_disable(struct qede_dev *edev)
|
||||||
if (ptp->tx_skb) {
|
if (ptp->tx_skb) {
|
||||||
dev_kfree_skb_any(ptp->tx_skb);
|
dev_kfree_skb_any(ptp->tx_skb);
|
||||||
ptp->tx_skb = NULL;
|
ptp->tx_skb = NULL;
|
||||||
|
clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disable PTP in HW */
|
/* Disable PTP in HW */
|
||||||
|
@ -423,7 +424,7 @@ void qede_ptp_disable(struct qede_dev *edev)
|
||||||
edev->ptp = NULL;
|
edev->ptp = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
|
static int qede_ptp_init(struct qede_dev *edev)
|
||||||
{
|
{
|
||||||
struct qede_ptp *ptp;
|
struct qede_ptp *ptp;
|
||||||
int rc;
|
int rc;
|
||||||
|
@ -444,25 +445,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
|
||||||
/* Init work queue for Tx timestamping */
|
/* Init work queue for Tx timestamping */
|
||||||
INIT_WORK(&ptp->work, qede_ptp_task);
|
INIT_WORK(&ptp->work, qede_ptp_task);
|
||||||
|
|
||||||
/* Init cyclecounter and timecounter. This is done only in the first
|
/* Init cyclecounter and timecounter */
|
||||||
* load. If done in every load, PTP application will fail when doing
|
memset(&ptp->cc, 0, sizeof(ptp->cc));
|
||||||
* unload / load (e.g. MTU change) while it is running.
|
ptp->cc.read = qede_ptp_read_cc;
|
||||||
*/
|
ptp->cc.mask = CYCLECOUNTER_MASK(64);
|
||||||
if (init_tc) {
|
ptp->cc.shift = 0;
|
||||||
memset(&ptp->cc, 0, sizeof(ptp->cc));
|
ptp->cc.mult = 1;
|
||||||
ptp->cc.read = qede_ptp_read_cc;
|
|
||||||
ptp->cc.mask = CYCLECOUNTER_MASK(64);
|
|
||||||
ptp->cc.shift = 0;
|
|
||||||
ptp->cc.mult = 1;
|
|
||||||
|
|
||||||
timecounter_init(&ptp->tc, &ptp->cc,
|
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
|
||||||
ktime_to_ns(ktime_get_real()));
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
|
int qede_ptp_enable(struct qede_dev *edev)
|
||||||
{
|
{
|
||||||
struct qede_ptp *ptp;
|
struct qede_ptp *ptp;
|
||||||
int rc;
|
int rc;
|
||||||
|
@ -483,7 +478,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
|
||||||
|
|
||||||
edev->ptp = ptp;
|
edev->ptp = ptp;
|
||||||
|
|
||||||
rc = qede_ptp_init(edev, init_tc);
|
rc = qede_ptp_init(edev);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err1;
|
goto err1;
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
|
||||||
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
|
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
|
||||||
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
|
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
|
||||||
void qede_ptp_disable(struct qede_dev *edev);
|
void qede_ptp_disable(struct qede_dev *edev);
|
||||||
int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
|
int qede_ptp_enable(struct qede_dev *edev);
|
||||||
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
|
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
|
||||||
|
|
||||||
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
|
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
|
||||||
|
|
|
@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
|
||||||
|
|
||||||
qede_rdma_cleanup_event(edev);
|
qede_rdma_cleanup_event(edev);
|
||||||
destroy_workqueue(edev->rdma_info.rdma_wq);
|
destroy_workqueue(edev->rdma_info.rdma_wq);
|
||||||
|
edev->rdma_info.rdma_wq = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
|
int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
|
||||||
|
@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
|
||||||
if (edev->rdma_info.exp_recovery)
|
if (edev->rdma_info.exp_recovery)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!edev->rdma_info.qedr_dev)
|
if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* We don't want the cleanup flow to start while we're allocating and
|
/* We don't want the cleanup flow to start while we're allocating and
|
||||||
|
|
|
@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
|
||||||
|
|
||||||
static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
|
static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
|
||||||
{
|
{
|
||||||
|
u16 elem_per_page = p_chain->elem_per_page;
|
||||||
|
u32 prod = p_chain->u.chain16.prod_idx;
|
||||||
|
u32 cons = p_chain->u.chain16.cons_idx;
|
||||||
u16 used;
|
u16 used;
|
||||||
|
|
||||||
used = (u16) (((u32)0x10000 +
|
if (prod < cons)
|
||||||
(u32)p_chain->u.chain16.prod_idx) -
|
prod += (u32)U16_MAX + 1;
|
||||||
(u32)p_chain->u.chain16.cons_idx);
|
|
||||||
|
used = (u16)(prod - cons);
|
||||||
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
|
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
|
||||||
used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
|
used -= prod / elem_per_page - cons / elem_per_page;
|
||||||
p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
|
|
||||||
|
|
||||||
return (u16)(p_chain->capacity - used);
|
return (u16)(p_chain->capacity - used);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
|
static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
|
||||||
{
|
{
|
||||||
|
u16 elem_per_page = p_chain->elem_per_page;
|
||||||
|
u64 prod = p_chain->u.chain32.prod_idx;
|
||||||
|
u64 cons = p_chain->u.chain32.cons_idx;
|
||||||
u32 used;
|
u32 used;
|
||||||
|
|
||||||
used = (u32) (((u64)0x100000000ULL +
|
if (prod < cons)
|
||||||
(u64)p_chain->u.chain32.prod_idx) -
|
prod += (u64)U32_MAX + 1;
|
||||||
(u64)p_chain->u.chain32.cons_idx);
|
|
||||||
|
used = (u32)(prod - cons);
|
||||||
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
|
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
|
||||||
used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
|
used -= (u32)(prod / elem_per_page - cons / elem_per_page);
|
||||||
p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
|
|
||||||
|
|
||||||
return p_chain->capacity - used;
|
return p_chain->capacity - used;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue