staging: unisys: visornic: add error handling for visorchannel_signalinsert/remove

Since signalinsert/remove now return valid error codes, we need to check
them when we call them in visornic. The error codes need to propagate out
to the calling functions.

Signed-off-by: David Kershner <david.kershner@unisys.com>
Reviewed-by: Reviewed-by: Tim Sell <timothy.sell@unisys.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
David Kershner 2017-03-28 09:34:37 -04:00 committed by Greg Kroah-Hartman
parent 2833399edb
commit 0315657116
1 changed files with 97 additions and 40 deletions

View File

@ -407,12 +407,14 @@ alloc_rcv_buf(struct net_device *netdev)
* @skb: skb to give to the IO partition * @skb: skb to give to the IO partition
* *
* Send the skb to the IO Partition. * Send the skb to the IO Partition.
* Returns void * Returns 0 or error
*/ */
static void static int
post_skb(struct uiscmdrsp *cmdrsp, post_skb(struct uiscmdrsp *cmdrsp,
struct visornic_devdata *devdata, struct sk_buff *skb) struct visornic_devdata *devdata, struct sk_buff *skb)
{ {
int err;
cmdrsp->net.buf = skb; cmdrsp->net.buf = skb;
cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data)); cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
cmdrsp->net.rcvpost.frag.pi_off = cmdrsp->net.rcvpost.frag.pi_off =
@ -420,18 +422,23 @@ post_skb(struct uiscmdrsp *cmdrsp,
cmdrsp->net.rcvpost.frag.pi_len = skb->len; cmdrsp->net.rcvpost.frag.pi_len = skb->len;
cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id; cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) { if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) > PI_PAGE_SIZE)
cmdrsp->net.type = NET_RCV_POST; return -EINVAL;
cmdrsp->cmdtype = CMD_NET_TYPE;
if (!visorchannel_signalinsert(devdata->dev->visorchannel, cmdrsp->net.type = NET_RCV_POST;
IOCHAN_TO_IOPART, cmdrsp->cmdtype = CMD_NET_TYPE;
cmdrsp)) { err = visorchannel_signalinsert(devdata->dev->visorchannel,
atomic_inc(&devdata->num_rcvbuf_in_iovm); IOCHAN_TO_IOPART,
devdata->chstat.sent_post++; cmdrsp);
} else { if (err) {
devdata->chstat.sent_post_failed++; devdata->chstat.sent_post_failed++;
} return err;
} }
atomic_inc(&devdata->num_rcvbuf_in_iovm);
devdata->chstat.sent_post++;
return 0;
} }
/* /*
@ -442,20 +449,25 @@ post_skb(struct uiscmdrsp *cmdrsp,
* @devdata: visornic device we are enabling/disabling * @devdata: visornic device we are enabling/disabling
* *
* Send the enable/disable message to the IO Partition. * Send the enable/disable message to the IO Partition.
* Returns void * Returns 0 or error
*/ */
static void static int
send_enbdis(struct net_device *netdev, int state, send_enbdis(struct net_device *netdev, int state,
struct visornic_devdata *devdata) struct visornic_devdata *devdata)
{ {
int err;
devdata->cmdrsp_rcv->net.enbdis.enable = state; devdata->cmdrsp_rcv->net.enbdis.enable = state;
devdata->cmdrsp_rcv->net.enbdis.context = netdev; devdata->cmdrsp_rcv->net.enbdis.context = netdev;
devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS; devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE; devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
if (!visorchannel_signalinsert(devdata->dev->visorchannel, err = visorchannel_signalinsert(devdata->dev->visorchannel,
IOCHAN_TO_IOPART, IOCHAN_TO_IOPART,
devdata->cmdrsp_rcv)) devdata->cmdrsp_rcv);
devdata->chstat.sent_enbdis++; if (err)
return err;
devdata->chstat.sent_enbdis++;
return 0;
} }
/* /*
@ -476,6 +488,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
int i; int i;
unsigned long flags; unsigned long flags;
int wait = 0; int wait = 0;
int err;
/* send a msg telling the other end we are stopping incoming pkts */ /* send a msg telling the other end we are stopping incoming pkts */
spin_lock_irqsave(&devdata->priv_lock, flags); spin_lock_irqsave(&devdata->priv_lock, flags);
@ -485,8 +498,11 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
/* send disable and wait for ack -- don't hold lock when sending /* send disable and wait for ack -- don't hold lock when sending
* disable because if the queue is full, insert might sleep. * disable because if the queue is full, insert might sleep.
* If an error occurs, don't wait for the timeout.
*/ */
send_enbdis(netdev, 0, devdata); err = send_enbdis(netdev, 0, devdata);
if (err)
return err;
/* wait for ack to arrive before we try to free rcv buffers /* wait for ack to arrive before we try to free rcv buffers
* NOTE: the other end automatically unposts the rcv buffers when * NOTE: the other end automatically unposts the rcv buffers when
@ -555,7 +571,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
static int static int
init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata) init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
{ {
int i, count; int i, j, count, err;
/* allocate fixed number of receive buffers to post to uisnic /* allocate fixed number of receive buffers to post to uisnic
* post receive buffers after we've allocated a required amount * post receive buffers after we've allocated a required amount
@ -585,8 +601,25 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
* lock - we've not enabled nor started the queue so there shouldn't * lock - we've not enabled nor started the queue so there shouldn't
* be any rcv or xmit activity * be any rcv or xmit activity
*/ */
for (i = 0; i < count; i++) for (i = 0; i < count; i++) {
post_skb(devdata->cmdrsp_rcv, devdata, devdata->rcvbuf[i]); err = post_skb(devdata->cmdrsp_rcv, devdata,
devdata->rcvbuf[i]);
if (!err)
continue;
/* Error handling -
* If we posted at least one skb, we should return success,
* but need to free the resources that we have not successfully
* posted.
*/
for (j = i; j < count; j++) {
kfree_skb(devdata->rcvbuf[j]);
devdata->rcvbuf[j] = NULL;
}
if (i == 0)
return err;
break;
}
return 0; return 0;
} }
@ -603,7 +636,7 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
static int static int
visornic_enable_with_timeout(struct net_device *netdev, const int timeout) visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
{ {
int i; int err = 0;
struct visornic_devdata *devdata = netdev_priv(netdev); struct visornic_devdata *devdata = netdev_priv(netdev);
unsigned long flags; unsigned long flags;
int wait = 0; int wait = 0;
@ -613,11 +646,11 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
/* NOTE: the other end automatically unposts the rcv buffers when it /* NOTE: the other end automatically unposts the rcv buffers when it
* gets a disable. * gets a disable.
*/ */
i = init_rcv_bufs(netdev, devdata); err = init_rcv_bufs(netdev, devdata);
if (i < 0) { if (err < 0) {
dev_err(&netdev->dev, dev_err(&netdev->dev,
"%s failed to init rcv bufs (%d)\n", __func__, i); "%s failed to init rcv bufs\n", __func__);
return i; return err;
} }
spin_lock_irqsave(&devdata->priv_lock, flags); spin_lock_irqsave(&devdata->priv_lock, flags);
@ -631,9 +664,12 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
spin_unlock_irqrestore(&devdata->priv_lock, flags); spin_unlock_irqrestore(&devdata->priv_lock, flags);
/* send enable and wait for ack -- don't hold lock when sending enable /* send enable and wait for ack -- don't hold lock when sending enable
* because if the queue is full, insert might sleep. * because if the queue is full, insert might sleep. If an error
* occurs error out.
*/ */
send_enbdis(netdev, 1, devdata); err = send_enbdis(netdev, 1, devdata);
if (err)
return err;
spin_lock_irqsave(&devdata->priv_lock, flags); spin_lock_irqsave(&devdata->priv_lock, flags);
while ((timeout == VISORNIC_INFINITE_RSP_WAIT) || while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
@ -801,6 +837,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
int len, firstfraglen, padlen; int len, firstfraglen, padlen;
struct uiscmdrsp *cmdrsp = NULL; struct uiscmdrsp *cmdrsp = NULL;
unsigned long flags; unsigned long flags;
int err;
devdata = netdev_priv(netdev); devdata = netdev_priv(netdev);
spin_lock_irqsave(&devdata->priv_lock, flags); spin_lock_irqsave(&devdata->priv_lock, flags);
@ -917,8 +954,9 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
if (visorchannel_signalinsert(devdata->dev->visorchannel, err = visorchannel_signalinsert(devdata->dev->visorchannel,
IOCHAN_TO_IOPART, cmdrsp)) { IOCHAN_TO_IOPART, cmdrsp);
if (err) {
netif_stop_queue(netdev); netif_stop_queue(netdev);
spin_unlock_irqrestore(&devdata->priv_lock, flags); spin_unlock_irqrestore(&devdata->priv_lock, flags);
devdata->busy_cnt++; devdata->busy_cnt++;
@ -996,6 +1034,7 @@ visornic_set_multi(struct net_device *netdev)
{ {
struct uiscmdrsp *cmdrsp; struct uiscmdrsp *cmdrsp;
struct visornic_devdata *devdata = netdev_priv(netdev); struct visornic_devdata *devdata = netdev_priv(netdev);
int err = 0;
if (devdata->old_flags == netdev->flags) if (devdata->old_flags == netdev->flags)
return; return;
@ -1012,10 +1051,12 @@ visornic_set_multi(struct net_device *netdev)
cmdrsp->net.enbdis.context = netdev; cmdrsp->net.enbdis.context = netdev;
cmdrsp->net.enbdis.enable = cmdrsp->net.enbdis.enable =
netdev->flags & IFF_PROMISC; netdev->flags & IFF_PROMISC;
visorchannel_signalinsert(devdata->dev->visorchannel, err = visorchannel_signalinsert(devdata->dev->visorchannel,
IOCHAN_TO_IOPART, IOCHAN_TO_IOPART,
cmdrsp); cmdrsp);
kfree(cmdrsp); kfree(cmdrsp);
if (err)
return;
out_save_flags: out_save_flags:
devdata->old_flags = netdev->flags; devdata->old_flags = netdev->flags;
@ -1108,7 +1149,12 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
status = -ENOMEM; status = -ENOMEM;
break; break;
} }
post_skb(cmdrsp, devdata, devdata->rcvbuf[i]); status = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
if (status) {
kfree_skb(devdata->rcvbuf[i]);
devdata->rcvbuf[i] = NULL;
break;
}
numreposted++; numreposted++;
break; break;
} }
@ -1531,17 +1577,18 @@ static const struct file_operations debugfs_info_fops = {
* Send receive buffers to the IO Partition. * Send receive buffers to the IO Partition.
* Returns void * Returns void
*/ */
static void static int
send_rcv_posts_if_needed(struct visornic_devdata *devdata) send_rcv_posts_if_needed(struct visornic_devdata *devdata)
{ {
int i; int i;
struct net_device *netdev; struct net_device *netdev;
struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv; struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated; int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
int err;
/* don't do this until vnic is marked ready */ /* don't do this until vnic is marked ready */
if (!(devdata->enabled && devdata->enab_dis_acked)) if (!(devdata->enabled && devdata->enab_dis_acked))
return; return 0;
netdev = devdata->netdev; netdev = devdata->netdev;
rcv_bufs_allocated = 0; rcv_bufs_allocated = 0;
@ -1560,11 +1607,17 @@ send_rcv_posts_if_needed(struct visornic_devdata *devdata)
break; break;
} }
rcv_bufs_allocated++; rcv_bufs_allocated++;
post_skb(cmdrsp, devdata, devdata->rcvbuf[i]); err = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
if (err) {
kfree_skb(devdata->rcvbuf[i]);
devdata->rcvbuf[i] = NULL;
break;
}
devdata->chstat.extra_rcvbufs_sent++; devdata->chstat.extra_rcvbufs_sent++;
} }
} }
devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated; devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
return 0;
} }
/* /*
@ -1687,8 +1740,12 @@ static int visornic_poll(struct napi_struct *napi, int budget)
struct visornic_devdata, struct visornic_devdata,
napi); napi);
int rx_count = 0; int rx_count = 0;
int err;
err = send_rcv_posts_if_needed(devdata);
if (err)
return err;
send_rcv_posts_if_needed(devdata);
service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget); service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
/* If there aren't any more packets to receive stop the poll */ /* If there aren't any more packets to receive stop the poll */