RDMA 5.10 fourth rc pull request
Two notable security issues and a collection of minor fixes: - Significant out of bounds access security issue in i40iw - Fix misuse of mmu notifiers in hfi1 - Several errors in the register map/usage in hns - Missing error returns in mthca -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl/BAZMACgkQOG33FX4g mxptaw/9EGojgoR0WBTjpokfbM9DtTMUuy2fq0r2y+sTA21nMk7g83tPQi8tDyiD e1oMEE1+u8yeVDSoWnZ3zUu/hBGIlMA89DK7dQ48JGnGhvFMuMlfpv/VCHCtDv8i cFiwbHfEnxx8lX12vMk3O/Vh076/AEtCW6uXmX7l+0Y8fYBpBteifByRs0Ik7dz8 7HSs9c7Y6GXcu3zbGml9nqYgMWHoYBUsXIQKhAdi9C75O4VbRdXgrRO1Wpwc8gdu jdxL3r3P4wDgfzF/XR3VGIZlahQA4K0G3yae8nxjBac6+aV/02LBzWkweQOhMWoK E+XwMdzDmGO4sVzU+EA3Se5coTlQywiPIl+o4I2AneQwLlr9gDjlMuV1JqbIRn56 zPiikVps3uCSXlSodakSd9phI6bg1YlRlVYbfduPtE9x1weXXbkm24yXrX9WAmh+ Y9ipAI9NKvRFra6ZfAetcROp2IxRK/5leX/Agbqy93xDN9XRC+7MLWqvRqLZkMpc tEBwmNDf+i5nGiz9MyW1lgj6lAFghBj0Ojxv5AVxYi6iP3YR5AUzLecXlRhWWQzm lkqXCUcNGidZpOBcQlEg87LZ/oDYg2N5DpKn69yVYi+hL2u/aeixZfMRCiqEGO/g 5MqYiO983sod9zccEhnjX0pp5JFkJEqWQ9IGNLmRjYmbcnGx1O4= =CKEA -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Two security issues and several small bug fixes. Things seem to have stabilized for this release here. Summary: - Significant out of bounds access security issue in i40iw - Fix misuse of mmu notifiers in hfi1 - Several errors in the register map/usage in hns - Missing error returns in mthca" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/hns: Bugfix for memory window mtpt configuration RDMA/hns: Fix retry_cnt and rnr_cnt when querying QP RDMA/hns: Fix wrong field of SRQ number the device supports IB/hfi1: Ensure correct mm is used at all times RDMA/i40iw: Address an mmap handler exploit in i40iw IB/mthca: fix return value of error branch in mthca_init_cq()
This commit is contained in:
commit
d41e9b22eb
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015-2020 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
|
@ -206,8 +207,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
|
|||
spin_lock_init(&fd->tid_lock);
|
||||
spin_lock_init(&fd->invalid_lock);
|
||||
fd->rec_cpu_num = -1; /* no cpu affinity by default */
|
||||
fd->mm = current->mm;
|
||||
mmgrab(fd->mm);
|
||||
fd->dd = dd;
|
||||
fp->private_data = fd;
|
||||
return 0;
|
||||
|
@ -711,7 +710,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
|
|||
|
||||
deallocate_ctxt(uctxt);
|
||||
done:
|
||||
mmdrop(fdata->mm);
|
||||
|
||||
if (atomic_dec_and_test(&dd->user_refcount))
|
||||
complete(&dd->user_comp);
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef _HFI1_KERNEL_H
|
||||
#define _HFI1_KERNEL_H
|
||||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015-2020 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
|
@ -1451,7 +1452,6 @@ struct hfi1_filedata {
|
|||
u32 invalid_tid_idx;
|
||||
/* protect invalid_tids array and invalid_tid_idx */
|
||||
spinlock_t invalid_lock;
|
||||
struct mm_struct *mm;
|
||||
};
|
||||
|
||||
extern struct xarray hfi1_dev_table;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2016 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
|
@ -48,23 +49,11 @@
|
|||
#include <linux/rculist.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/interval_tree_generic.h>
|
||||
#include <linux/sched/mm.h>
|
||||
|
||||
#include "mmu_rb.h"
|
||||
#include "trace.h"
|
||||
|
||||
struct mmu_rb_handler {
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root_cached root;
|
||||
void *ops_arg;
|
||||
spinlock_t lock; /* protect the RB tree */
|
||||
struct mmu_rb_ops *ops;
|
||||
struct mm_struct *mm;
|
||||
struct list_head lru_list;
|
||||
struct work_struct del_work;
|
||||
struct list_head del_list;
|
||||
struct workqueue_struct *wq;
|
||||
};
|
||||
|
||||
static unsigned long mmu_node_start(struct mmu_rb_node *);
|
||||
static unsigned long mmu_node_last(struct mmu_rb_node *);
|
||||
static int mmu_notifier_range_start(struct mmu_notifier *,
|
||||
|
@ -92,37 +81,36 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
|
|||
return PAGE_ALIGN(node->addr + node->len) - 1;
|
||||
}
|
||||
|
||||
int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
|
||||
int hfi1_mmu_rb_register(void *ops_arg,
|
||||
struct mmu_rb_ops *ops,
|
||||
struct workqueue_struct *wq,
|
||||
struct mmu_rb_handler **handler)
|
||||
{
|
||||
struct mmu_rb_handler *handlr;
|
||||
struct mmu_rb_handler *h;
|
||||
int ret;
|
||||
|
||||
handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
|
||||
if (!handlr)
|
||||
h = kmalloc(sizeof(*h), GFP_KERNEL);
|
||||
if (!h)
|
||||
return -ENOMEM;
|
||||
|
||||
handlr->root = RB_ROOT_CACHED;
|
||||
handlr->ops = ops;
|
||||
handlr->ops_arg = ops_arg;
|
||||
INIT_HLIST_NODE(&handlr->mn.hlist);
|
||||
spin_lock_init(&handlr->lock);
|
||||
handlr->mn.ops = &mn_opts;
|
||||
handlr->mm = mm;
|
||||
INIT_WORK(&handlr->del_work, handle_remove);
|
||||
INIT_LIST_HEAD(&handlr->del_list);
|
||||
INIT_LIST_HEAD(&handlr->lru_list);
|
||||
handlr->wq = wq;
|
||||
h->root = RB_ROOT_CACHED;
|
||||
h->ops = ops;
|
||||
h->ops_arg = ops_arg;
|
||||
INIT_HLIST_NODE(&h->mn.hlist);
|
||||
spin_lock_init(&h->lock);
|
||||
h->mn.ops = &mn_opts;
|
||||
INIT_WORK(&h->del_work, handle_remove);
|
||||
INIT_LIST_HEAD(&h->del_list);
|
||||
INIT_LIST_HEAD(&h->lru_list);
|
||||
h->wq = wq;
|
||||
|
||||
ret = mmu_notifier_register(&handlr->mn, handlr->mm);
|
||||
ret = mmu_notifier_register(&h->mn, current->mm);
|
||||
if (ret) {
|
||||
kfree(handlr);
|
||||
kfree(h);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*handler = handlr;
|
||||
*handler = h;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -134,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
|
|||
struct list_head del_list;
|
||||
|
||||
/* Unregister first so we don't get any more notifications. */
|
||||
mmu_notifier_unregister(&handler->mn, handler->mm);
|
||||
mmu_notifier_unregister(&handler->mn, handler->mn.mm);
|
||||
|
||||
/*
|
||||
* Make sure the wq delete handler is finished running. It will not
|
||||
|
@ -166,6 +154,10 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
|||
int ret = 0;
|
||||
|
||||
trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return -EPERM;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
|
||||
if (node) {
|
||||
|
@ -180,6 +172,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
|||
__mmu_int_rb_remove(mnode, &handler->root);
|
||||
list_del(&mnode->list); /* remove from LRU list */
|
||||
}
|
||||
mnode->handler = handler;
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
return ret;
|
||||
|
@ -217,6 +210,9 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
|||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
node = __mmu_rb_search(handler, addr, len);
|
||||
if (node) {
|
||||
|
@ -239,6 +235,9 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
|||
unsigned long flags;
|
||||
bool stop = false;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&del_list);
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
|
@ -272,6 +271,9 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return;
|
||||
|
||||
/* Validity of handler and node pointers has been checked by caller. */
|
||||
trace_hfi1_mmu_rb_remove(node->addr, node->len);
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2016 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
|
@ -54,6 +55,7 @@ struct mmu_rb_node {
|
|||
unsigned long len;
|
||||
unsigned long __last;
|
||||
struct rb_node node;
|
||||
struct mmu_rb_handler *handler;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
@ -71,7 +73,19 @@ struct mmu_rb_ops {
|
|||
void *evict_arg, bool *stop);
|
||||
};
|
||||
|
||||
int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
|
||||
struct mmu_rb_handler {
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root_cached root;
|
||||
void *ops_arg;
|
||||
spinlock_t lock; /* protect the RB tree */
|
||||
struct mmu_rb_ops *ops;
|
||||
struct list_head lru_list;
|
||||
struct work_struct del_work;
|
||||
struct list_head del_list;
|
||||
struct workqueue_struct *wq;
|
||||
};
|
||||
|
||||
int hfi1_mmu_rb_register(void *ops_arg,
|
||||
struct mmu_rb_ops *ops,
|
||||
struct workqueue_struct *wq,
|
||||
struct mmu_rb_handler **handler);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015-2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
|
@ -173,15 +174,18 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
|
|||
{
|
||||
struct page **pages;
|
||||
struct hfi1_devdata *dd = fd->uctxt->dd;
|
||||
struct mm_struct *mm;
|
||||
|
||||
if (mapped) {
|
||||
pci_unmap_single(dd->pcidev, node->dma_addr,
|
||||
node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
pages = &node->pages[idx];
|
||||
mm = mm_from_tid_node(node);
|
||||
} else {
|
||||
pages = &tidbuf->pages[idx];
|
||||
mm = current->mm;
|
||||
}
|
||||
hfi1_release_user_pages(fd->mm, pages, npages, mapped);
|
||||
hfi1_release_user_pages(mm, pages, npages, mapped);
|
||||
fd->tid_n_pinned -= npages;
|
||||
}
|
||||
|
||||
|
@ -216,12 +220,12 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
|
|||
* pages, accept the amount pinned so far and program only that.
|
||||
* User space knows how to deal with partially programmed buffers.
|
||||
*/
|
||||
if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
|
||||
if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
|
||||
kfree(pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
|
||||
pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
|
||||
if (pinned <= 0) {
|
||||
kfree(pages);
|
||||
return pinned;
|
||||
|
@ -756,7 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
|
|||
|
||||
if (fd->use_mn) {
|
||||
ret = mmu_interval_notifier_insert(
|
||||
&node->notifier, fd->mm,
|
||||
&node->notifier, current->mm,
|
||||
tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
|
||||
&tid_mn_ops);
|
||||
if (ret)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef _HFI1_USER_EXP_RCV_H
|
||||
#define _HFI1_USER_EXP_RCV_H
|
||||
/*
|
||||
* Copyright(c) 2020 - Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
|
@ -95,4 +96,9 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
|
|||
int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
|
||||
struct hfi1_tid_info *tinfo);
|
||||
|
||||
static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
|
||||
{
|
||||
return node->notifier.mm;
|
||||
}
|
||||
|
||||
#endif /* _HFI1_USER_EXP_RCV_H */
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
* Copyright(c) 2020 - Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
|
@ -188,7 +189,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
|
|||
atomic_set(&pq->n_reqs, 0);
|
||||
init_waitqueue_head(&pq->wait);
|
||||
atomic_set(&pq->n_locked, 0);
|
||||
pq->mm = fd->mm;
|
||||
|
||||
iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
|
||||
activate_packet_queue, NULL, NULL);
|
||||
|
@ -230,7 +230,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
|
|||
|
||||
cq->nentries = hfi1_sdma_comp_ring_size;
|
||||
|
||||
ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
|
||||
ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
|
||||
&pq->handler);
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "Failed to register with MMU %d", ret);
|
||||
|
@ -980,13 +980,13 @@ static int pin_sdma_pages(struct user_sdma_request *req,
|
|||
|
||||
npages -= node->npages;
|
||||
retry:
|
||||
if (!hfi1_can_pin_pages(pq->dd, pq->mm,
|
||||
if (!hfi1_can_pin_pages(pq->dd, current->mm,
|
||||
atomic_read(&pq->n_locked), npages)) {
|
||||
cleared = sdma_cache_evict(pq, npages);
|
||||
if (cleared >= npages)
|
||||
goto retry;
|
||||
}
|
||||
pinned = hfi1_acquire_user_pages(pq->mm,
|
||||
pinned = hfi1_acquire_user_pages(current->mm,
|
||||
((unsigned long)iovec->iov.iov_base +
|
||||
(node->npages * PAGE_SIZE)), npages, 0,
|
||||
pages + node->npages);
|
||||
|
@ -995,7 +995,7 @@ retry:
|
|||
return pinned;
|
||||
}
|
||||
if (pinned != npages) {
|
||||
unpin_vector_pages(pq->mm, pages, node->npages, pinned);
|
||||
unpin_vector_pages(current->mm, pages, node->npages, pinned);
|
||||
return -EFAULT;
|
||||
}
|
||||
kfree(node->pages);
|
||||
|
@ -1008,7 +1008,8 @@ retry:
|
|||
static void unpin_sdma_pages(struct sdma_mmu_node *node)
|
||||
{
|
||||
if (node->npages) {
|
||||
unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
|
||||
unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
|
||||
node->npages);
|
||||
atomic_sub(node->npages, &node->pq->n_locked);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef _HFI1_USER_SDMA_H
|
||||
#define _HFI1_USER_SDMA_H
|
||||
/*
|
||||
* Copyright(c) 2020 - Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
|
@ -133,7 +134,6 @@ struct hfi1_user_sdma_pkt_q {
|
|||
unsigned long unpinned;
|
||||
struct mmu_rb_handler *handler;
|
||||
atomic_t n_locked;
|
||||
struct mm_struct *mm;
|
||||
};
|
||||
|
||||
struct hfi1_user_sdma_comp_q {
|
||||
|
@ -250,4 +250,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|||
struct iovec *iovec, unsigned long dim,
|
||||
unsigned long *count);
|
||||
|
||||
static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
|
||||
{
|
||||
return node->rb.handler->mn.mm;
|
||||
}
|
||||
|
||||
#endif /* _HFI1_USER_SDMA_H */
|
||||
|
|
|
@ -2936,6 +2936,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
|
|||
|
||||
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
|
||||
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
|
||||
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
|
||||
|
||||
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
|
||||
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
|
||||
|
@ -4989,11 +4990,11 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|||
V2_QPC_BYTE_28_AT_M,
|
||||
V2_QPC_BYTE_28_AT_S);
|
||||
qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
|
||||
V2_QPC_BYTE_212_RETRY_CNT_M,
|
||||
V2_QPC_BYTE_212_RETRY_CNT_S);
|
||||
V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
|
||||
V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
|
||||
qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
|
||||
V2_QPC_BYTE_244_RNR_CNT_M,
|
||||
V2_QPC_BYTE_244_RNR_CNT_S);
|
||||
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
|
||||
V2_QPC_BYTE_244_RNR_NUM_INIT_S);
|
||||
|
||||
done:
|
||||
qp_attr->cur_qp_state = qp_attr->qp_state;
|
||||
|
|
|
@ -1661,7 +1661,7 @@ struct hns_roce_query_pf_caps_d {
|
|||
__le32 rsv_uars_rsv_qps;
|
||||
};
|
||||
#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0
|
||||
#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(20, 0)
|
||||
#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0)
|
||||
|
||||
#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20
|
||||
#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20)
|
||||
|
|
|
@ -54,10 +54,6 @@
|
|||
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
|
||||
__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
|
||||
|
||||
static int push_mode;
|
||||
module_param(push_mode, int, 0644);
|
||||
MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
|
||||
|
||||
static int debug;
|
||||
module_param(debug, int, 0644);
|
||||
MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
|
||||
|
@ -1580,7 +1576,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
|
|||
if (status)
|
||||
goto exit;
|
||||
iwdev->obj_next = iwdev->obj_mem;
|
||||
iwdev->push_mode = push_mode;
|
||||
|
||||
init_waitqueue_head(&iwdev->vchnl_waitq);
|
||||
init_waitqueue_head(&dev->vf_reqs);
|
||||
|
|
|
@ -167,39 +167,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
|
|||
*/
|
||||
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
{
|
||||
struct i40iw_ucontext *ucontext;
|
||||
u64 db_addr_offset, push_offset, pfn;
|
||||
struct i40iw_ucontext *ucontext = to_ucontext(context);
|
||||
u64 dbaddr;
|
||||
|
||||
ucontext = to_ucontext(context);
|
||||
if (ucontext->iwdev->sc_dev.is_pf) {
|
||||
db_addr_offset = I40IW_DB_ADDR_OFFSET;
|
||||
push_offset = I40IW_PUSH_OFFSET;
|
||||
if (vma->vm_pgoff)
|
||||
vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
|
||||
} else {
|
||||
db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
|
||||
push_offset = I40IW_VF_PUSH_OFFSET;
|
||||
if (vma->vm_pgoff)
|
||||
vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
|
||||
}
|
||||
if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
|
||||
dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
|
||||
|
||||
if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
} else {
|
||||
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
else
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
}
|
||||
|
||||
pfn = vma->vm_pgoff +
|
||||
(pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
|
||||
PAGE_SHIFT);
|
||||
|
||||
return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
|
||||
vma->vm_page_prot, NULL);
|
||||
return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
|
||||
pgprot_noncached(vma->vm_page_prot), NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -803,8 +803,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
|
|||
}
|
||||
|
||||
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
|
||||
if (IS_ERR(mailbox))
|
||||
if (IS_ERR(mailbox)) {
|
||||
err = PTR_ERR(mailbox);
|
||||
goto err_out_arm;
|
||||
}
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
|
||||
|
@ -846,9 +848,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
|
|||
}
|
||||
|
||||
spin_lock_irq(&dev->cq_table.lock);
|
||||
if (mthca_array_set(&dev->cq_table.cq,
|
||||
cq->cqn & (dev->limits.num_cqs - 1),
|
||||
cq)) {
|
||||
err = mthca_array_set(&dev->cq_table.cq,
|
||||
cq->cqn & (dev->limits.num_cqs - 1), cq);
|
||||
if (err) {
|
||||
spin_unlock_irq(&dev->cq_table.lock);
|
||||
goto err_out_free_mr;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue