vhost_scsi: add support for worker ioctls
This has vhost-scsi support the worker ioctls by calling the vhost_worker_ioctl helper. With a single worker, the single thread becomes a bottlneck when trying to use 3 or more virtqueues like: fio --filename=/dev/sdb --direct=1 --rw=randrw --bs=4k \ --ioengine=libaio --iodepth=128 --numjobs=3 With the patches and doing a worker per vq, we can scale to at least 16 vCPUs/vqs (that's my system limit) with the same command fio command above with numjobs=16: fio --filename=/dev/sdb --direct=1 --rw=randrw --bs=4k \ --ioengine=libaio --iodepth=64 --numjobs=16 which gives around 2002K IOPs. Note that for testing I dropped depth to 64 above because the vhost/virt layer supports only 1024 total commands per device. And the only tuning I did was set LIO's emulate_pr to 0 to avoid LIO's PR lock in the main IO path which becomes an issue at around 12 jobs/virtqueues. Signed-off-by: Mike Christie <michael.christie@oracle.com> Message-Id: <20230626232307.97930-17-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
c1ecd8e950
commit
d74b55e655
|
@ -1927,6 +1927,14 @@ vhost_scsi_ioctl(struct file *f,
|
|||
if (copy_from_user(&features, featurep, sizeof features))
|
||||
return -EFAULT;
|
||||
return vhost_scsi_set_features(vs, features);
|
||||
case VHOST_NEW_WORKER:
|
||||
case VHOST_FREE_WORKER:
|
||||
case VHOST_ATTACH_VRING_WORKER:
|
||||
case VHOST_GET_VRING_WORKER:
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return r;
|
||||
default:
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
|
||||
|
|
Loading…
Reference in New Issue