vhost-net: Cleanup vhost_ubuf and vhost_zcopy
- Rename vhost_ubuf to vhost_net_ubuf - Rename vhost_zcopy_mask to vhost_net_zcopy_mask - Make funcs static Signed-off-by: Asias He <asias@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
e40ab7484f
commit
fe729a57c8
|
@ -70,7 +70,7 @@ enum {
|
||||||
VHOST_NET_VQ_MAX = 2,
|
VHOST_NET_VQ_MAX = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vhost_ubuf_ref {
|
struct vhost_net_ubuf_ref {
|
||||||
struct kref kref;
|
struct kref kref;
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
struct vhost_virtqueue *vq;
|
struct vhost_virtqueue *vq;
|
||||||
|
@ -93,7 +93,7 @@ struct vhost_net_virtqueue {
|
||||||
struct ubuf_info *ubuf_info;
|
struct ubuf_info *ubuf_info;
|
||||||
/* Reference counting for outstanding ubufs.
|
/* Reference counting for outstanding ubufs.
|
||||||
* Protected by vq mutex. Writers must also take device mutex. */
|
* Protected by vq mutex. Writers must also take device mutex. */
|
||||||
struct vhost_ubuf_ref *ubufs;
|
struct vhost_net_ubuf_ref *ubufs;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vhost_net {
|
struct vhost_net {
|
||||||
|
@ -110,24 +110,25 @@ struct vhost_net {
|
||||||
bool tx_flush;
|
bool tx_flush;
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned vhost_zcopy_mask __read_mostly;
|
static unsigned vhost_net_zcopy_mask __read_mostly;
|
||||||
|
|
||||||
void vhost_enable_zcopy(int vq)
|
static void vhost_net_enable_zcopy(int vq)
|
||||||
{
|
{
|
||||||
vhost_zcopy_mask |= 0x1 << vq;
|
vhost_net_zcopy_mask |= 0x1 << vq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vhost_zerocopy_done_signal(struct kref *kref)
|
static void vhost_net_zerocopy_done_signal(struct kref *kref)
|
||||||
{
|
{
|
||||||
struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
|
struct vhost_net_ubuf_ref *ubufs;
|
||||||
kref);
|
|
||||||
|
ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
|
||||||
wake_up(&ubufs->wait);
|
wake_up(&ubufs->wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
|
static struct vhost_net_ubuf_ref *
|
||||||
bool zcopy)
|
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
|
||||||
{
|
{
|
||||||
struct vhost_ubuf_ref *ubufs;
|
struct vhost_net_ubuf_ref *ubufs;
|
||||||
/* No zero copy backend? Nothing to count. */
|
/* No zero copy backend? Nothing to count. */
|
||||||
if (!zcopy)
|
if (!zcopy)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -140,14 +141,14 @@ struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
|
||||||
return ubufs;
|
return ubufs;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
|
static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
|
||||||
{
|
{
|
||||||
kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
|
kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
|
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
|
||||||
{
|
{
|
||||||
kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
|
kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
|
||||||
wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
|
wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
|
||||||
kfree(ubufs);
|
kfree(ubufs);
|
||||||
}
|
}
|
||||||
|
@ -159,7 +160,7 @@ static void vhost_net_clear_ubuf_info(struct vhost_net *n)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < n->dev.nvqs; ++i) {
|
for (i = 0; i < n->dev.nvqs; ++i) {
|
||||||
zcopy = vhost_zcopy_mask & (0x1 << i);
|
zcopy = vhost_net_zcopy_mask & (0x1 << i);
|
||||||
if (zcopy)
|
if (zcopy)
|
||||||
kfree(n->vqs[i].ubuf_info);
|
kfree(n->vqs[i].ubuf_info);
|
||||||
}
|
}
|
||||||
|
@ -171,7 +172,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < n->dev.nvqs; ++i) {
|
for (i = 0; i < n->dev.nvqs; ++i) {
|
||||||
zcopy = vhost_zcopy_mask & (0x1 << i);
|
zcopy = vhost_net_zcopy_mask & (0x1 << i);
|
||||||
if (!zcopy)
|
if (!zcopy)
|
||||||
continue;
|
continue;
|
||||||
n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
|
n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
|
||||||
|
@ -183,7 +184,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
|
||||||
|
|
||||||
err:
|
err:
|
||||||
while (i--) {
|
while (i--) {
|
||||||
zcopy = vhost_zcopy_mask & (0x1 << i);
|
zcopy = vhost_net_zcopy_mask & (0x1 << i);
|
||||||
if (!zcopy)
|
if (!zcopy)
|
||||||
continue;
|
continue;
|
||||||
kfree(n->vqs[i].ubuf_info);
|
kfree(n->vqs[i].ubuf_info);
|
||||||
|
@ -305,7 +306,7 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net,
|
||||||
|
|
||||||
static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
|
static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
|
||||||
{
|
{
|
||||||
struct vhost_ubuf_ref *ubufs = ubuf->ctx;
|
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
|
||||||
struct vhost_virtqueue *vq = ubufs->vq;
|
struct vhost_virtqueue *vq = ubufs->vq;
|
||||||
int cnt = atomic_read(&ubufs->kref.refcount);
|
int cnt = atomic_read(&ubufs->kref.refcount);
|
||||||
|
|
||||||
|
@ -322,7 +323,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
|
||||||
/* set len to mark this desc buffers done DMA */
|
/* set len to mark this desc buffers done DMA */
|
||||||
vq->heads[ubuf->desc].len = success ?
|
vq->heads[ubuf->desc].len = success ?
|
||||||
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
|
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
|
||||||
vhost_ubuf_put(ubufs);
|
vhost_net_ubuf_put(ubufs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Expects to be always run from workqueue - which acts as
|
/* Expects to be always run from workqueue - which acts as
|
||||||
|
@ -345,7 +346,7 @@ static void handle_tx(struct vhost_net *net)
|
||||||
int err;
|
int err;
|
||||||
size_t hdr_size;
|
size_t hdr_size;
|
||||||
struct socket *sock;
|
struct socket *sock;
|
||||||
struct vhost_ubuf_ref *uninitialized_var(ubufs);
|
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
|
||||||
bool zcopy, zcopy_used;
|
bool zcopy, zcopy_used;
|
||||||
|
|
||||||
/* TODO: check that we are running from vhost_worker? */
|
/* TODO: check that we are running from vhost_worker? */
|
||||||
|
@ -441,7 +442,7 @@ static void handle_tx(struct vhost_net *net)
|
||||||
if (unlikely(err < 0)) {
|
if (unlikely(err < 0)) {
|
||||||
if (zcopy_used) {
|
if (zcopy_used) {
|
||||||
if (ubufs)
|
if (ubufs)
|
||||||
vhost_ubuf_put(ubufs);
|
vhost_net_ubuf_put(ubufs);
|
||||||
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
|
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
|
||||||
% UIO_MAXIOV;
|
% UIO_MAXIOV;
|
||||||
}
|
}
|
||||||
|
@ -795,7 +796,7 @@ static void vhost_net_flush(struct vhost_net *n)
|
||||||
n->tx_flush = true;
|
n->tx_flush = true;
|
||||||
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
|
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
|
||||||
/* Wait for all lower device DMAs done. */
|
/* Wait for all lower device DMAs done. */
|
||||||
vhost_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
|
vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
|
||||||
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
|
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
|
||||||
n->tx_flush = false;
|
n->tx_flush = false;
|
||||||
kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
|
kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
|
||||||
|
@ -896,7 +897,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
|
||||||
struct socket *sock, *oldsock;
|
struct socket *sock, *oldsock;
|
||||||
struct vhost_virtqueue *vq;
|
struct vhost_virtqueue *vq;
|
||||||
struct vhost_net_virtqueue *nvq;
|
struct vhost_net_virtqueue *nvq;
|
||||||
struct vhost_ubuf_ref *ubufs, *oldubufs = NULL;
|
struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
mutex_lock(&n->dev.mutex);
|
mutex_lock(&n->dev.mutex);
|
||||||
|
@ -927,7 +928,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
|
||||||
oldsock = rcu_dereference_protected(vq->private_data,
|
oldsock = rcu_dereference_protected(vq->private_data,
|
||||||
lockdep_is_held(&vq->mutex));
|
lockdep_is_held(&vq->mutex));
|
||||||
if (sock != oldsock) {
|
if (sock != oldsock) {
|
||||||
ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock));
|
ubufs = vhost_net_ubuf_alloc(vq,
|
||||||
|
sock && vhost_sock_zcopy(sock));
|
||||||
if (IS_ERR(ubufs)) {
|
if (IS_ERR(ubufs)) {
|
||||||
r = PTR_ERR(ubufs);
|
r = PTR_ERR(ubufs);
|
||||||
goto err_ubufs;
|
goto err_ubufs;
|
||||||
|
@ -953,7 +955,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
|
||||||
mutex_unlock(&vq->mutex);
|
mutex_unlock(&vq->mutex);
|
||||||
|
|
||||||
if (oldubufs) {
|
if (oldubufs) {
|
||||||
vhost_ubuf_put_and_wait(oldubufs);
|
vhost_net_ubuf_put_and_wait(oldubufs);
|
||||||
mutex_lock(&vq->mutex);
|
mutex_lock(&vq->mutex);
|
||||||
vhost_zerocopy_signal_used(n, vq);
|
vhost_zerocopy_signal_used(n, vq);
|
||||||
mutex_unlock(&vq->mutex);
|
mutex_unlock(&vq->mutex);
|
||||||
|
@ -971,7 +973,7 @@ err_used:
|
||||||
rcu_assign_pointer(vq->private_data, oldsock);
|
rcu_assign_pointer(vq->private_data, oldsock);
|
||||||
vhost_net_enable_vq(n, vq);
|
vhost_net_enable_vq(n, vq);
|
||||||
if (ubufs)
|
if (ubufs)
|
||||||
vhost_ubuf_put_and_wait(ubufs);
|
vhost_net_ubuf_put_and_wait(ubufs);
|
||||||
err_ubufs:
|
err_ubufs:
|
||||||
fput(sock->file);
|
fput(sock->file);
|
||||||
err_vq:
|
err_vq:
|
||||||
|
@ -1133,7 +1135,7 @@ static struct miscdevice vhost_net_misc = {
|
||||||
static int vhost_net_init(void)
|
static int vhost_net_init(void)
|
||||||
{
|
{
|
||||||
if (experimental_zcopytx)
|
if (experimental_zcopytx)
|
||||||
vhost_enable_zcopy(VHOST_NET_VQ_TX);
|
vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
|
||||||
return misc_register(&vhost_net_misc);
|
return misc_register(&vhost_net_misc);
|
||||||
}
|
}
|
||||||
module_init(vhost_net_init);
|
module_init(vhost_net_init);
|
||||||
|
|
Loading…
Reference in New Issue