IB/hfi1: Use a common pad buffer for 9B and 16B packets

There is no reason for a different pad buffer for the two
packet types.

Expand the current buffer allocation to allow for both
packet types.

Fixes: f8195f3b14 ("IB/hfi1: Eliminate allocation while atomic")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Kaike Wan <kaike.wan@intel.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Link: https://lore.kernel.org/r/20191004204934.26838.13099.stgit@awfm-01.aw.intel.com
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Mike Marciniszyn 2019-10-04 16:49:34 -04:00 committed by Doug Ledford
parent 9ed5bd7d22
commit 22bb136534
2 changed files with 7 additions and 8 deletions

View File

@ -65,6 +65,7 @@
#define SDMA_DESCQ_CNT 2048 #define SDMA_DESCQ_CNT 2048
#define SDMA_DESC_INTR 64 #define SDMA_DESC_INTR 64
#define INVALID_TAIL 0xffff #define INVALID_TAIL 0xffff
#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
static uint sdma_descq_cnt = SDMA_DESCQ_CNT; static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
module_param(sdma_descq_cnt, uint, S_IRUGO); module_param(sdma_descq_cnt, uint, S_IRUGO);
@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
struct sdma_engine *sde; struct sdma_engine *sde;
if (dd->sdma_pad_dma) { if (dd->sdma_pad_dma) {
dma_free_coherent(&dd->pcidev->dev, 4, dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
(void *)dd->sdma_pad_dma, (void *)dd->sdma_pad_dma,
dd->sdma_pad_phys); dd->sdma_pad_phys);
dd->sdma_pad_dma = NULL; dd->sdma_pad_dma = NULL;
@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
} }
/* Allocate memory for pad */ /* Allocate memory for pad */
dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32), dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
&dd->sdma_pad_phys, GFP_KERNEL); &dd->sdma_pad_phys, GFP_KERNEL);
if (!dd->sdma_pad_dma) { if (!dd->sdma_pad_dma) {
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");

View File

@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
/* Length of buffer to create verbs txreq cache name */ /* Length of buffer to create verbs txreq cache name */
#define TXREQ_NAME_LEN 24 #define TXREQ_NAME_LEN 24
/* 16B trailing buffer */
static const u8 trail_buf[MAX_16B_PADDING];
static uint wss_threshold = 80; static uint wss_threshold = 80;
module_param(wss_threshold, uint, S_IRUGO); module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy"); MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
/* add icrc, lt byte, and padding to flit */ /* add icrc, lt byte, and padding to flit */
if (extra_bytes) if (extra_bytes)
ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
(void *)trail_buf, extra_bytes); sde->dd->sdma_pad_phys, extra_bytes);
bail_txadd: bail_txadd:
return ret; return ret;
@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
} }
/* add icrc, lt byte, and padding to flit */ /* add icrc, lt byte, and padding to flit */
if (extra_bytes) if (extra_bytes)
seg_pio_copy_mid(pbuf, trail_buf, extra_bytes); seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
extra_bytes);
seg_pio_copy_end(pbuf); seg_pio_copy_end(pbuf);
} }