RDMA: Use vzalloc() to replace vmalloc()+memset(0)
Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
4162cf6497
commit
948579cd8c
|
@ -459,13 +459,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
|||
IB_DEVICE_MEM_WINDOW);
|
||||
|
||||
/* Allocate the qptr_array */
|
||||
c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
|
||||
c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
|
||||
if (!c2dev->qptr_array) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Inialize the qptr_array */
|
||||
memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
|
||||
/* Initialize the qptr_array */
|
||||
c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
|
||||
c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
|
||||
c2dev->qptr_array[2] = (void *) &c2dev->aeq;
|
||||
|
|
|
@ -222,15 +222,14 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
|
|||
queue->small_page = NULL;
|
||||
|
||||
/* allocate queue page pointers */
|
||||
queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
|
||||
queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
|
||||
if (!queue->queue_pages) {
|
||||
queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
|
||||
queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
|
||||
if (!queue->queue_pages) {
|
||||
ehca_gen_err("Couldn't allocate queue page list");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
|
||||
|
||||
/* allocate actual queue pages */
|
||||
if (is_small) {
|
||||
|
|
|
@ -199,12 +199,11 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
|
|||
goto bail;
|
||||
}
|
||||
|
||||
dd = vmalloc(sizeof(*dd));
|
||||
dd = vzalloc(sizeof(*dd));
|
||||
if (!dd) {
|
||||
dd = ERR_PTR(-ENOMEM);
|
||||
goto bail;
|
||||
}
|
||||
memset(dd, 0, sizeof(*dd));
|
||||
dd->ipath_unit = -1;
|
||||
|
||||
spin_lock_irqsave(&ipath_devs_lock, flags);
|
||||
|
|
|
@ -1530,7 +1530,7 @@ static int init_subports(struct ipath_devdata *dd,
|
|||
}
|
||||
|
||||
num_subports = uinfo->spu_subport_cnt;
|
||||
pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
|
||||
pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
|
||||
if (!pd->subport_uregbase) {
|
||||
ret = -ENOMEM;
|
||||
goto bail;
|
||||
|
@ -1538,13 +1538,13 @@ static int init_subports(struct ipath_devdata *dd,
|
|||
/* Note: pd->port_rcvhdrq_size isn't initialized yet. */
|
||||
size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
|
||||
sizeof(u32), PAGE_SIZE) * num_subports;
|
||||
pd->subport_rcvhdr_base = vmalloc(size);
|
||||
pd->subport_rcvhdr_base = vzalloc(size);
|
||||
if (!pd->subport_rcvhdr_base) {
|
||||
ret = -ENOMEM;
|
||||
goto bail_ureg;
|
||||
}
|
||||
|
||||
pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
|
||||
pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
|
||||
pd->port_rcvegrbuf_size *
|
||||
num_subports);
|
||||
if (!pd->subport_rcvegrbuf) {
|
||||
|
@ -1556,11 +1556,6 @@ static int init_subports(struct ipath_devdata *dd,
|
|||
pd->port_subport_id = uinfo->spu_subport_id;
|
||||
pd->active_slaves = 1;
|
||||
set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
|
||||
memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
|
||||
memset(pd->subport_rcvhdr_base, 0, size);
|
||||
memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
|
||||
pd->port_rcvegrbuf_size *
|
||||
num_subports);
|
||||
goto bail;
|
||||
|
||||
bail_rhdr:
|
||||
|
|
|
@ -442,7 +442,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
|
|||
struct page **pages;
|
||||
dma_addr_t *addrs;
|
||||
|
||||
pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
|
||||
pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
|
||||
sizeof(struct page *));
|
||||
if (!pages) {
|
||||
ipath_dev_err(dd, "failed to allocate shadow page * "
|
||||
|
@ -461,9 +461,6 @@ static void init_shadow_tids(struct ipath_devdata *dd)
|
|||
return;
|
||||
}
|
||||
|
||||
memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt *
|
||||
sizeof(struct page *));
|
||||
|
||||
dd->ipath_pageshadow = pages;
|
||||
dd->ipath_physshadow = addrs;
|
||||
}
|
||||
|
|
|
@ -270,23 +270,20 @@ static void init_shadow_tids(struct qib_devdata *dd)
|
|||
struct page **pages;
|
||||
dma_addr_t *addrs;
|
||||
|
||||
pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
|
||||
pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
|
||||
if (!pages) {
|
||||
qib_dev_err(dd, "failed to allocate shadow page * "
|
||||
"array, no expected sends!\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
|
||||
addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
|
||||
if (!addrs) {
|
||||
qib_dev_err(dd, "failed to allocate shadow dma handle "
|
||||
"array, no expected sends!\n");
|
||||
goto bail_free;
|
||||
}
|
||||
|
||||
memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
|
||||
memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
|
||||
|
||||
dd->pageshadow = pages;
|
||||
dd->physshadow = addrs;
|
||||
return;
|
||||
|
|
|
@ -352,15 +352,13 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
|
||||
rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
|
||||
if (!rx->rx_ring) {
|
||||
printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
|
||||
priv->ca->name, ipoib_recvq_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
|
||||
|
||||
t = kmalloc(sizeof *t, GFP_KERNEL);
|
||||
if (!t) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -1097,13 +1095,12 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
|
|||
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
|
||||
int ret;
|
||||
|
||||
p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring);
|
||||
p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
|
||||
if (!p->tx_ring) {
|
||||
ipoib_warn(priv, "failed to allocate tx ring\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_tx;
|
||||
}
|
||||
memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
|
||||
|
||||
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
|
||||
if (IS_ERR(p->qp)) {
|
||||
|
@ -1521,7 +1518,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
|
|||
return;
|
||||
}
|
||||
|
||||
priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
|
||||
priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
|
||||
if (!priv->cm.srq_ring) {
|
||||
printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
|
||||
priv->ca->name, ipoib_recvq_size);
|
||||
|
@ -1530,7 +1527,6 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
|
|||
return;
|
||||
}
|
||||
|
||||
memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
|
||||
}
|
||||
|
||||
int ipoib_cm_dev_init(struct net_device *dev)
|
||||
|
|
|
@ -916,13 +916,12 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
|
|||
goto out;
|
||||
}
|
||||
|
||||
priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
|
||||
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
|
||||
if (!priv->tx_ring) {
|
||||
printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
|
||||
ca->name, ipoib_sendq_size);
|
||||
goto out_rx_ring_cleanup;
|
||||
}
|
||||
memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
|
||||
|
||||
/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
|
||||
|
||||
|
|
Loading…
Reference in New Issue