IB/qib: Protect from buffer overflow in struct qib_user_sdma_pkt fields
Overflowing either addrlimit or bytes_togo can allow userspace to trigger
a buffer overflow of kernel memory. Check for overflows in all the places
doing math on user controlled buffers.
Fixes: f931551baf
("IB/qib: Add new qib driver for QLogic PCIe InfiniBand adapters")
Link: https://lore.kernel.org/r/20211012175519.7298.77738.stgit@awfm-01.cornelisnetworks.com
Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
1ab52ac1e9
commit
d39bf40e55
|
@ -602,7 +602,7 @@ done:
|
|||
/*
|
||||
* How many pages in this iovec element?
|
||||
*/
|
||||
static int qib_user_sdma_num_pages(const struct iovec *iov)
|
||||
static size_t qib_user_sdma_num_pages(const struct iovec *iov)
|
||||
{
|
||||
const unsigned long addr = (unsigned long) iov->iov_base;
|
||||
const unsigned long len = iov->iov_len;
|
||||
|
@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
|
|||
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
|
||||
struct qib_user_sdma_queue *pq,
|
||||
struct qib_user_sdma_pkt *pkt,
|
||||
unsigned long addr, int tlen, int npages)
|
||||
unsigned long addr, int tlen, size_t npages)
|
||||
{
|
||||
struct page *pages[8];
|
||||
int i, j;
|
||||
|
@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
|
|||
unsigned long idx;
|
||||
|
||||
for (idx = 0; idx < niov; idx++) {
|
||||
const int npages = qib_user_sdma_num_pages(iov + idx);
|
||||
const size_t npages = qib_user_sdma_num_pages(iov + idx);
|
||||
const unsigned long addr = (unsigned long) iov[idx].iov_base;
|
||||
|
||||
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
|
||||
|
@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||
unsigned pktnw;
|
||||
unsigned pktnwc;
|
||||
int nfrags = 0;
|
||||
int npages = 0;
|
||||
int bytes_togo = 0;
|
||||
size_t npages = 0;
|
||||
size_t bytes_togo = 0;
|
||||
int tiddma = 0;
|
||||
int cfur;
|
||||
|
||||
|
@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||
|
||||
npages += qib_user_sdma_num_pages(&iov[idx]);
|
||||
|
||||
bytes_togo += slen;
|
||||
if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
|
||||
bytes_togo > type_max(typeof(pkt->bytes_togo))) {
|
||||
ret = -EINVAL;
|
||||
goto free_pbc;
|
||||
}
|
||||
pktnwc += slen >> 2;
|
||||
idx++;
|
||||
nfrags++;
|
||||
|
@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||
}
|
||||
|
||||
if (frag_size) {
|
||||
int tidsmsize, n;
|
||||
size_t pktsize;
|
||||
size_t tidsmsize, n, pktsize, sz, addrlimit;
|
||||
|
||||
n = npages*((2*PAGE_SIZE/frag_size)+1);
|
||||
pktsize = struct_size(pkt, addr, n);
|
||||
|
@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||
else
|
||||
tidsmsize = 0;
|
||||
|
||||
pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
|
||||
if (check_add_overflow(pktsize, tidsmsize, &sz)) {
|
||||
ret = -EINVAL;
|
||||
goto free_pbc;
|
||||
}
|
||||
pkt = kmalloc(sz, GFP_KERNEL);
|
||||
if (!pkt) {
|
||||
ret = -ENOMEM;
|
||||
goto free_pbc;
|
||||
}
|
||||
pkt->largepkt = 1;
|
||||
pkt->frag_size = frag_size;
|
||||
pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
|
||||
if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
|
||||
&addrlimit) ||
|
||||
addrlimit > type_max(typeof(pkt->addrlimit))) {
|
||||
ret = -EINVAL;
|
||||
goto free_pbc;
|
||||
}
|
||||
pkt->addrlimit = addrlimit;
|
||||
|
||||
if (tiddma) {
|
||||
char *tidsm = (char *)pkt + pktsize;
|
||||
|
|
Loading…
Reference in New Issue