Merge branch 'qed-Bug-fixes'
Manish Chopra says: ==================== qed: Bug fixes This series have SR-IOV and some general fixes. Please consider applying it to "net" ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
bfe2599dd2
|
@ -795,19 +795,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
|
|||
|
||||
/* get pq index according to PQ_FLAGS */
|
||||
static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
|
||||
u32 pq_flags)
|
||||
unsigned long pq_flags)
|
||||
{
|
||||
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
|
||||
|
||||
/* Can't have multiple flags set here */
|
||||
if (bitmap_weight((unsigned long *)&pq_flags,
|
||||
if (bitmap_weight(&pq_flags,
|
||||
sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
|
||||
DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
|
||||
DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
|
||||
DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
|
||||
DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
|
|
@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
|
|||
(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
|
||||
!!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
|
||||
|
||||
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
|
||||
(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
|
||||
!!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
|
||||
|
||||
SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
|
||||
!!(accept_filter & QED_ACCEPT_BCAST));
|
||||
|
||||
|
@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
}
|
||||
|
||||
if (p_params->update_ctl_frame_check) {
|
||||
p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
|
||||
p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
|
||||
}
|
||||
|
||||
/* Update mcast bins for VFs, PF doesn't use this functionality */
|
||||
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
|
||||
|
||||
|
@ -2688,7 +2697,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
|
|||
if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
|
||||
accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
|
||||
QED_ACCEPT_MCAST_UNMATCHED;
|
||||
accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
|
||||
accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
|
||||
QED_ACCEPT_MCAST_UNMATCHED;
|
||||
} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
|
||||
accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
|
||||
accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
|
||||
|
|
|
@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
|
|||
struct qed_rss_params *rss_params;
|
||||
struct qed_filter_accept_flags accept_flags;
|
||||
struct qed_sge_tpa_params *sge_tpa_params;
|
||||
u8 update_ctl_frame_check;
|
||||
u8 mac_chk_en;
|
||||
u8 ethtype_chk_en;
|
||||
};
|
||||
|
||||
int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
|
||||
|
|
|
@ -2451,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
|
|||
{
|
||||
struct qed_ll2_tx_pkt_info pkt;
|
||||
const skb_frag_t *frag;
|
||||
u8 flags = 0, nr_frags;
|
||||
int rc = -EINVAL, i;
|
||||
dma_addr_t mapping;
|
||||
u16 vlan = 0;
|
||||
u8 flags = 0;
|
||||
|
||||
if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
|
||||
DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
|
||||
/* Cache number of fragments from SKB since SKB may be freed by
|
||||
* the completion routine after calling qed_ll2_prepare_tx_packet()
|
||||
*/
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
|
||||
if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
|
||||
DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
|
||||
1 + skb_shinfo(skb)->nr_frags);
|
||||
1 + nr_frags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -2485,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
memset(&pkt, 0, sizeof(pkt));
|
||||
pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
|
||||
pkt.num_of_bds = 1 + nr_frags;
|
||||
pkt.vlan = vlan;
|
||||
pkt.bd_flags = flags;
|
||||
pkt.tx_dest = QED_LL2_TX_DEST_NW;
|
||||
|
@ -2496,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
|
|||
test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
|
||||
pkt.remove_stag = true;
|
||||
|
||||
/* qed_ll2_prepare_tx_packet() may actually send the packet if
|
||||
* there are no fragments in the skb and subsequently the completion
|
||||
* routine may run and free the SKB, so no dereferencing the SKB
|
||||
* beyond this point unless skb has any fragments.
|
||||
*/
|
||||
rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
|
||||
&pkt, 1);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
for (i = 0; i < nr_frags; i++) {
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
|
||||
|
|
|
@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
|
|||
params.vport_id = vf->vport_id;
|
||||
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
|
||||
params.mtu = vf->mtu;
|
||||
params.check_mac = true;
|
||||
|
||||
/* Non trusted VFs should enable control frame filtering */
|
||||
params.check_mac = !vf->p_vf_info.is_trusted_configured;
|
||||
|
||||
rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
|
||||
if (rc) {
|
||||
|
@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
|
|||
params.opaque_fid = vf->opaque_fid;
|
||||
params.vport_id = vf->vport_id;
|
||||
|
||||
params.update_ctl_frame_check = 1;
|
||||
params.mac_chk_en = !vf_info->is_trusted_configured;
|
||||
|
||||
if (vf_info->rx_accept_mode & mask) {
|
||||
flags->update_rx_mode_config = 1;
|
||||
flags->rx_accept_filter = vf_info->rx_accept_mode;
|
||||
|
@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
|
|||
}
|
||||
|
||||
if (flags->update_rx_mode_config ||
|
||||
flags->update_tx_mode_config)
|
||||
flags->update_tx_mode_config ||
|
||||
params.update_ctl_frame_check)
|
||||
qed_sp_vport_update(hwfn, ¶ms,
|
||||
QED_SPQ_MODE_EBLOCK, NULL);
|
||||
}
|
||||
|
|
|
@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
|
|||
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
|
||||
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
|
||||
struct vf_pf_resc_request *p_resc;
|
||||
u8 retry_cnt = VF_ACQUIRE_THRESH;
|
||||
bool resources_acquired = false;
|
||||
struct vfpf_acquire_tlv *req;
|
||||
int rc = 0, attempts = 0;
|
||||
|
@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
|
|||
|
||||
/* send acquire request */
|
||||
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
|
||||
|
||||
/* Re-try acquire in case of vf-pf hw channel timeout */
|
||||
if (retry_cnt && rc == -EBUSY) {
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||
"VF retrying to acquire due to VPC timeout\n");
|
||||
retry_cnt--;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (rc)
|
||||
goto exit;
|
||||
|
||||
|
|
Loading…
Reference in New Issue