Nothing exciting: virtio-blk users might see a bit of a boost from the
doubling of the default queue length though. Cheers, Rusty. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.14 (GNU/Linux) iQIcBAABAgAGBQJTOirvAAoJENkgDmzRrbjxpqAP/3114WOubf4MNoi24eXNkU2x ZC++orq408A72g8dB7A0XAhatKc8Ay0bDP5hOZNAgTHm2hjYxmhpC9UlKEzzElpW yjwR0wYBXA0RoJuZqCp9MNgOtkU54QoQZ0c4EZblagslUZBmKQPUDE7XdgkaqO6o A3azfCjAFDu523Azep9Npj1sk+H+VH3OIXyMGY+uyHBw1a2rnmIhn4lQCQ+pX+YO 3wpqxEragpjBAizs1CAAB9wWm2O8zVACxkoUXVYI8Tu60n99lwr9Abxlc0oHSIig E7kBnyzQVHfkDrPXR3EdwTi3Hwd/BaOiW4dPvQ3IJKvPOzoiS4H3IpJCnCV5PfRb VHl3q//SzPQ+GXH7WH2Fhb9JxoLxBRzFcy3kdIR1wBHYahAOiQjcLgapOO5mVq3X PJy9CDs2L9rjbtxvQWtnl62V3JFGw+ZdhhG/BjeC5Who/aSh/mDoss7/qdfrxKJx z5IWYSlJw7ighOuF0dPdCKAX9WiWqENvga31Q2svrH4Hxlx6eGumEmX+YQw0iRAf ddMYA+1qLT4myPTN0nORFM+T/mkZHNkNMCr0qylRFH0j6hSiDxwWqQG0eXA661By W6nIkW++sj2Vkk4knMGCXSyMmy9Nrv+1R+8unQJCXixYevotP5JEY0DoCQwlGuuq xa0UR+2q9Htnbytu8S0K =AoMS -----END PGP SIGNATURE----- Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux Pull virtio updates from Rusty Russell: "Nothing exciting: virtio-blk users might see a bit of a boost from the doubling of the default queue length though" * tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: virtio-blk: base queue-depth on virtqueue ringsize or module param Revert a02bbb1ccfe8: MAINTAINERS: add virtio-dev ML for virtio virtio: fail adding buffer on broken queues. virtio-rng: don't crash if virtqueue is broken. virtio_balloon: don't crash if virtqueue is broken. virtio_blk: don't crash, report error if virtqueue is broken. virtio_net: don't crash if virtqueue is broken. virtio_balloon: don't softlockup on huge balloon changes. virtio: Use pci_enable_msix_exact() instead of pci_enable_msix() MAINTAINERS: virtio-dev is subscribers only tools/virtio: add a missing ) tools/virtio: fix missing kmemleak_ignore symbol tools/virtio: update internal copies of headers
This commit is contained in:
commit
64056a9425
|
@ -9432,7 +9432,6 @@ F: include/media/videobuf2-*
|
|||
|
||||
VIRTIO CONSOLE DRIVER
|
||||
M: Amit Shah <amit.shah@redhat.com>
|
||||
L: virtio-dev@lists.oasis-open.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: drivers/char/virtio_console.c
|
||||
|
@ -9442,7 +9441,6 @@ F: include/uapi/linux/virtio_console.h
|
|||
VIRTIO CORE, NET AND BLOCK DRIVERS
|
||||
M: Rusty Russell <rusty@rustcorp.com.au>
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
L: virtio-dev@lists.oasis-open.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: drivers/virtio/
|
||||
|
@ -9455,7 +9453,6 @@ F: include/uapi/linux/virtio_*.h
|
|||
VIRTIO HOST (VHOST)
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
L: virtio-dev@lists.oasis-open.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
|
@ -158,6 +158,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
|
|||
unsigned long flags;
|
||||
unsigned int num;
|
||||
const bool last = (req->cmd_flags & REQ_END) != 0;
|
||||
int err;
|
||||
|
||||
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
|
||||
|
||||
|
@ -198,11 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
|
|||
}
|
||||
|
||||
spin_lock_irqsave(&vblk->vq_lock, flags);
|
||||
if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
|
||||
err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
|
||||
if (err) {
|
||||
virtqueue_kick(vblk->vq);
|
||||
spin_unlock_irqrestore(&vblk->vq_lock, flags);
|
||||
blk_mq_stop_hw_queue(hctx);
|
||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||
/* Out of mem doesn't actually happen, since we fall back
|
||||
* to direct descriptors */
|
||||
if (err == -ENOMEM || err == -ENOSPC)
|
||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
||||
}
|
||||
|
||||
if (last)
|
||||
|
@ -485,10 +491,11 @@ static struct blk_mq_ops virtio_mq_ops = {
|
|||
static struct blk_mq_reg virtio_mq_reg = {
|
||||
.ops = &virtio_mq_ops,
|
||||
.nr_hw_queues = 1,
|
||||
.queue_depth = 64,
|
||||
.queue_depth = 0, /* Set in virtblk_probe */
|
||||
.numa_node = NUMA_NO_NODE,
|
||||
.flags = BLK_MQ_F_SHOULD_MERGE,
|
||||
};
|
||||
module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444);
|
||||
|
||||
static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, unsigned int nr)
|
||||
|
@ -553,6 +560,13 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
goto out_free_vq;
|
||||
}
|
||||
|
||||
/* Default queue sizing is to fill the ring. */
|
||||
if (!virtio_mq_reg.queue_depth) {
|
||||
virtio_mq_reg.queue_depth = vblk->vq->num_free;
|
||||
/* ... but without indirect descs, we use 2 descs per req */
|
||||
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
|
||||
virtio_mq_reg.queue_depth /= 2;
|
||||
}
|
||||
virtio_mq_reg.cmd_size =
|
||||
sizeof(struct virtblk_req) +
|
||||
sizeof(struct scatterlist) * sg_elems;
|
||||
|
|
|
@ -47,8 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
|
|||
sg_init_one(&sg, buf, size);
|
||||
|
||||
/* There should always be room for one buffer. */
|
||||
if (virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL) < 0)
|
||||
BUG();
|
||||
virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL);
|
||||
|
||||
virtqueue_kick(vq);
|
||||
}
|
||||
|
|
|
@ -938,7 +938,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
|
|||
sgs[out_num] = &stat;
|
||||
|
||||
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
|
||||
BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC) < 0);
|
||||
virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
|
||||
|
||||
if (unlikely(!virtqueue_kick(vi->cvq)))
|
||||
return status == VIRTIO_NET_OK;
|
||||
|
|
|
@ -108,8 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
|
|||
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
|
||||
|
||||
/* We should always be able to add one buffer to an empty queue. */
|
||||
if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
|
||||
BUG();
|
||||
virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
|
||||
virtqueue_kick(vq);
|
||||
|
||||
/* When host has read buffer, this completes via balloon_ack */
|
||||
|
@ -258,8 +257,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
|
|||
if (!virtqueue_get_buf(vq, &len))
|
||||
return;
|
||||
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
|
||||
if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
|
||||
BUG();
|
||||
virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
|
||||
virtqueue_kick(vq);
|
||||
}
|
||||
|
||||
|
@ -310,6 +308,12 @@ static int balloon(void *_vballoon)
|
|||
else if (diff < 0)
|
||||
leak_balloon(vb, -diff);
|
||||
update_balloon_size(vb);
|
||||
|
||||
/*
|
||||
* For large balloon changes, we could spend a lot of time
|
||||
* and always have work to do. Be nice if preempt disabled.
|
||||
*/
|
||||
cond_resched();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -338,7 +342,7 @@ static int init_vqs(struct virtio_balloon *vb)
|
|||
|
||||
/*
|
||||
* Prime this virtqueue with one buffer so the hypervisor can
|
||||
* use it to signal us later.
|
||||
* use it to signal us later (it can't be broken yet!).
|
||||
*/
|
||||
sg_init_one(&sg, vb->stats, sizeof vb->stats);
|
||||
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
|
||||
|
|
|
@ -333,10 +333,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
|
|||
for (i = 0; i < nvectors; ++i)
|
||||
vp_dev->msix_entries[i].entry = i;
|
||||
|
||||
/* pci_enable_msix returns positive if we can't get this many. */
|
||||
err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors);
|
||||
if (err > 0)
|
||||
err = -ENOSPC;
|
||||
err = pci_enable_msix_exact(vp_dev->pci_dev,
|
||||
vp_dev->msix_entries, nvectors);
|
||||
if (err)
|
||||
goto error;
|
||||
vp_dev->msix_enabled = 1;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#ifdef DEBUG
|
||||
/* For development, we want to crash whenever the ring is screwed. */
|
||||
|
@ -203,6 +204,11 @@ static inline int virtqueue_add(struct virtqueue *_vq,
|
|||
|
||||
BUG_ON(data == NULL);
|
||||
|
||||
if (unlikely(vq->broken)) {
|
||||
END_USE(vq);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
{
|
||||
ktime_t now = ktime_get();
|
||||
|
@ -309,7 +315,7 @@ add_head:
|
|||
* Caller must ensure we don't call this with other virtqueue operations
|
||||
* at the same time (except where noted).
|
||||
*
|
||||
* Returns zero or a negative error (ie. ENOSPC, ENOMEM).
|
||||
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
|
||||
*/
|
||||
int virtqueue_add_sgs(struct virtqueue *_vq,
|
||||
struct scatterlist *sgs[],
|
||||
|
@ -347,7 +353,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
|
|||
* Caller must ensure we don't call this with other virtqueue operations
|
||||
* at the same time (except where noted).
|
||||
*
|
||||
* Returns zero or a negative error (ie. ENOSPC, ENOMEM).
|
||||
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
|
||||
*/
|
||||
int virtqueue_add_outbuf(struct virtqueue *vq,
|
||||
struct scatterlist sg[], unsigned int num,
|
||||
|
@ -369,7 +375,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
|
|||
* Caller must ensure we don't call this with other virtqueue operations
|
||||
* at the same time (except where noted).
|
||||
*
|
||||
* Returns zero or a negative error (ie. ENOSPC, ENOMEM).
|
||||
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
|
||||
*/
|
||||
int virtqueue_add_inbuf(struct virtqueue *vq,
|
||||
struct scatterlist sg[], unsigned int num,
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
static inline void kmemleak_ignore(const void *ptr)
|
||||
{
|
||||
}
|
|
@ -63,7 +63,7 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
|
|||
void *data,
|
||||
gfp_t gfp);
|
||||
|
||||
void virtqueue_kick(struct virtqueue *vq);
|
||||
bool virtqueue_kick(struct virtqueue *vq);
|
||||
|
||||
void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
|
||||
|
||||
|
@ -79,7 +79,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
|
|||
struct virtio_device *vdev,
|
||||
bool weak_barriers,
|
||||
void *pages,
|
||||
void (*notify)(struct virtqueue *vq),
|
||||
bool (*notify)(struct virtqueue *vq),
|
||||
void (*callback)(struct virtqueue *vq),
|
||||
const char *name);
|
||||
void vring_del_virtqueue(struct virtqueue *vq);
|
||||
|
|
|
@ -172,7 +172,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
|
|||
GFP_ATOMIC);
|
||||
if (likely(r == 0)) {
|
||||
++started;
|
||||
if (unlikely(!virtqueue_kick(vq->vq))
|
||||
if (unlikely(!virtqueue_kick(vq->vq)))
|
||||
r = -1;
|
||||
}
|
||||
} else
|
||||
|
|
Loading…
Reference in New Issue