Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: virtio: fix race in enable_cb virtio: Enable netpoll interface for netconsole logging virtio: handle > 2 billion page balloon targets virtio: Fix sysfs bits to have proper block symlink virtio: Use spin_lock_irqsave/restore for virtio-pci
This commit is contained in:
commit
84841384ec
|
@ -238,6 +238,7 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
vblk->disk->first_minor = index_to_minor(index);
|
||||
vblk->disk->private_data = vblk;
|
||||
vblk->disk->fops = &virtblk_fops;
|
||||
vblk->disk->driverfs_dev = &vdev->dev;
|
||||
index++;
|
||||
|
||||
/* If barriers are supported, tell block layer that queue is ordered */
|
||||
|
|
|
@ -203,8 +203,11 @@ again:
|
|||
if (received < budget) {
|
||||
netif_rx_complete(vi->dev, napi);
|
||||
if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
|
||||
&& netif_rx_reschedule(vi->dev, napi))
|
||||
&& napi_schedule_prep(napi)) {
|
||||
vi->rvq->vq_ops->disable_cb(vi->rvq);
|
||||
__netif_rx_schedule(vi->dev, napi);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
return received;
|
||||
|
@ -278,10 +281,11 @@ again:
|
|||
pr_debug("%s: virtio not prepared to send\n", dev->name);
|
||||
netif_stop_queue(dev);
|
||||
|
||||
/* Activate callback for using skbs: if this fails it
|
||||
/* Activate callback for using skbs: if this returns false it
|
||||
* means some were used in the meantime. */
|
||||
if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
|
||||
printk("Unlikely: restart svq failed\n");
|
||||
printk("Unlikely: restart svq race\n");
|
||||
vi->svq->vq_ops->disable_cb(vi->svq);
|
||||
netif_start_queue(dev);
|
||||
goto again;
|
||||
}
|
||||
|
@ -294,6 +298,15 @@ again:
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void virtnet_netpoll(struct net_device *dev)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
|
||||
napi_schedule(&vi->napi);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int virtnet_open(struct net_device *dev)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
|
@ -336,6 +349,9 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
dev->stop = virtnet_close;
|
||||
dev->hard_start_xmit = start_xmit;
|
||||
dev->features = NETIF_F_HIGHDMA;
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
dev->poll_controller = virtnet_netpoll;
|
||||
#endif
|
||||
SET_NETDEV_DEV(dev, &vdev->dev);
|
||||
|
||||
/* Do we support "hardware" checksums? */
|
||||
|
|
|
@ -152,7 +152,7 @@ static void virtballoon_changed(struct virtio_device *vdev)
|
|||
wake_up(&vb->config_change);
|
||||
}
|
||||
|
||||
static inline int towards_target(struct virtio_balloon *vb)
|
||||
static inline s64 towards_target(struct virtio_balloon *vb)
|
||||
{
|
||||
u32 v;
|
||||
__virtio_config_val(vb->vdev,
|
||||
|
@ -176,7 +176,7 @@ static int balloon(void *_vballoon)
|
|||
|
||||
set_freezable();
|
||||
while (!kthread_should_stop()) {
|
||||
int diff;
|
||||
s64 diff;
|
||||
|
||||
try_to_freeze();
|
||||
wait_event_interruptible(vb->config_change,
|
||||
|
|
|
@ -177,6 +177,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
|
|||
struct virtio_pci_device *vp_dev = opaque;
|
||||
struct virtio_pci_vq_info *info;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
unsigned long flags;
|
||||
u8 isr;
|
||||
|
||||
/* reading the ISR has the effect of also clearing it so it's very
|
||||
|
@ -197,12 +198,12 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
|
|||
drv->config_changed(&vp_dev->vdev);
|
||||
}
|
||||
|
||||
spin_lock(&vp_dev->lock);
|
||||
spin_lock_irqsave(&vp_dev->lock, flags);
|
||||
list_for_each_entry(info, &vp_dev->virtqueues, node) {
|
||||
if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
spin_unlock(&vp_dev->lock);
|
||||
spin_unlock_irqrestore(&vp_dev->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -214,6 +215,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
|
|||
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
||||
struct virtio_pci_vq_info *info;
|
||||
struct virtqueue *vq;
|
||||
unsigned long flags;
|
||||
u16 num;
|
||||
int err;
|
||||
|
||||
|
@ -255,9 +257,9 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
|
|||
vq->priv = info;
|
||||
info->vq = vq;
|
||||
|
||||
spin_lock(&vp_dev->lock);
|
||||
spin_lock_irqsave(&vp_dev->lock, flags);
|
||||
list_add(&info->node, &vp_dev->virtqueues);
|
||||
spin_unlock(&vp_dev->lock);
|
||||
spin_unlock_irqrestore(&vp_dev->lock, flags);
|
||||
|
||||
return vq;
|
||||
|
||||
|
@ -274,10 +276,11 @@ static void vp_del_vq(struct virtqueue *vq)
|
|||
{
|
||||
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
|
||||
struct virtio_pci_vq_info *info = vq->priv;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vp_dev->lock);
|
||||
spin_lock_irqsave(&vp_dev->lock, flags);
|
||||
list_del(&info->node);
|
||||
spin_unlock(&vp_dev->lock);
|
||||
spin_unlock_irqrestore(&vp_dev->lock, flags);
|
||||
|
||||
vring_del_virtqueue(vq);
|
||||
|
||||
|
|
|
@ -232,7 +232,6 @@ static bool vring_enable_cb(struct virtqueue *_vq)
|
|||
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
||||
mb();
|
||||
if (unlikely(more_used(vq))) {
|
||||
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
|
||||
END_USE(vq);
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -43,8 +43,9 @@ struct virtqueue
|
|||
* vq: the struct virtqueue we're talking about.
|
||||
* @enable_cb: restart callbacks after disable_cb.
|
||||
* vq: the struct virtqueue we're talking about.
|
||||
* This returns "false" (and doesn't re-enable) if there are pending
|
||||
* buffers in the queue, to avoid a race.
|
||||
* This re-enables callbacks; it returns "false" if there are pending
|
||||
* buffers in the queue, to detect a possible race between the driver
|
||||
* checking for more work, and enabling callbacks.
|
||||
*
|
||||
* Locking rules are straightforward: the driver is responsible for
|
||||
* locking. No two operations may be invoked simultaneously.
|
||||
|
|
Loading…
Reference in New Issue