staging/rdma/hfi1: don't cache "prescan head"
When HFI1_CAP_DMA_RTAIL is toggled off the "prescan head" can get out of sync with the receive context's "head". This happens when, after prescan_rxq() newly arrived packets are then received, and processed by an RX interrupt handler. This is an unavoidable race, and to avoid getting out of sync we always start prescanning at the current "rcd->head" entry. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Arthur Kepner <arthur.kepner@intel.com> Signed-off-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
701e441d82
commit
3e7ccca08d
|
@ -509,14 +509,10 @@ static inline void init_ps_mdata(struct ps_mdata *mdata,
|
||||||
mdata->rsize = packet->rsize;
|
mdata->rsize = packet->rsize;
|
||||||
mdata->maxcnt = packet->maxcnt;
|
mdata->maxcnt = packet->maxcnt;
|
||||||
|
|
||||||
if (rcd->ps_state.initialized == 0) {
|
mdata->ps_head = packet->rhqoff;
|
||||||
mdata->ps_head = packet->rhqoff;
|
|
||||||
rcd->ps_state.initialized++;
|
|
||||||
} else
|
|
||||||
mdata->ps_head = rcd->ps_state.ps_head;
|
|
||||||
|
|
||||||
if (HFI1_CAP_IS_KSET(DMA_RTAIL)) {
|
if (HFI1_CAP_IS_KSET(DMA_RTAIL)) {
|
||||||
mdata->ps_tail = packet->hdrqtail;
|
mdata->ps_tail = get_rcvhdrtail(rcd);
|
||||||
mdata->ps_seq = 0; /* not used with DMA_RTAIL */
|
mdata->ps_seq = 0; /* not used with DMA_RTAIL */
|
||||||
} else {
|
} else {
|
||||||
mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
|
mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
|
||||||
|
@ -533,12 +529,9 @@ static inline int ps_done(struct ps_mdata *mdata, u64 rhf)
|
||||||
|
|
||||||
static inline void update_ps_mdata(struct ps_mdata *mdata)
|
static inline void update_ps_mdata(struct ps_mdata *mdata)
|
||||||
{
|
{
|
||||||
struct hfi1_ctxtdata *rcd = mdata->rcd;
|
|
||||||
|
|
||||||
mdata->ps_head += mdata->rsize;
|
mdata->ps_head += mdata->rsize;
|
||||||
if (mdata->ps_head > mdata->maxcnt)
|
if (mdata->ps_head >= mdata->maxcnt)
|
||||||
mdata->ps_head = 0;
|
mdata->ps_head = 0;
|
||||||
rcd->ps_state.ps_head = mdata->ps_head;
|
|
||||||
if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
|
if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
|
||||||
if (++mdata->ps_seq > 13)
|
if (++mdata->ps_seq > 13)
|
||||||
mdata->ps_seq = 1;
|
mdata->ps_seq = 1;
|
||||||
|
|
|
@ -139,15 +139,6 @@ extern const struct pci_error_handlers hfi1_pci_err_handler;
|
||||||
struct hfi1_opcode_stats_perctx;
|
struct hfi1_opcode_stats_perctx;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* struct ps_state keeps state associated with RX queue "prescanning"
|
|
||||||
* (prescanning for FECNs, and BECNs), if prescanning is in use.
|
|
||||||
*/
|
|
||||||
struct ps_state {
|
|
||||||
u32 ps_head;
|
|
||||||
int initialized;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ctxt_eager_bufs {
|
struct ctxt_eager_bufs {
|
||||||
ssize_t size; /* total size of eager buffers */
|
ssize_t size; /* total size of eager buffers */
|
||||||
u32 count; /* size of buffers array */
|
u32 count; /* size of buffers array */
|
||||||
|
@ -302,10 +293,6 @@ struct hfi1_ctxtdata {
|
||||||
struct list_head sdma_queues;
|
struct list_head sdma_queues;
|
||||||
spinlock_t sdma_qlock;
|
spinlock_t sdma_qlock;
|
||||||
|
|
||||||
#ifdef CONFIG_PRESCAN_RXQ
|
|
||||||
struct ps_state ps_state;
|
|
||||||
#endif /* CONFIG_PRESCAN_RXQ */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The interrupt handler for a particular receive context can vary
|
* The interrupt handler for a particular receive context can vary
|
||||||
* throughout it's lifetime. This is not a lock protected data member so
|
* throughout it's lifetime. This is not a lock protected data member so
|
||||||
|
|
Loading…
Reference in New Issue