vhost/vsock: fix use-after-free in network stack callers

If the network stack calls .send_pkt()/.cancel_pkt() during .release(),
a struct vhost_vsock use-after-free is possible.  This occurs because
.release() does not wait for other CPUs to stop using struct
vhost_vsock.

Switch to an RCU-enabled hashtable (indexed by guest CID) so that
.release() can wait for other CPUs by calling synchronize_rcu().  This
also eliminates vhost_vsock_lock acquisition in the data path so it
could have a positive effect on performance.

This is CVE-2018-14625 "kernel: use-after-free Read in vhost_transport_send_pkt".

Cc: stable@vger.kernel.org
Reported-and-tested-by: syzbot+bd391451452fb0b93039@syzkaller.appspotmail.com
Reported-by: syzbot+e3e074963495f92a89ed@syzkaller.appspotmail.com
Reported-by: syzbot+d5a0a170c5069658b141@syzkaller.appspotmail.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2018-11-05 10:35:47 +00:00 committed by Michael S. Tsirkin
parent 78b1a52e05
commit 834e772c8d
1 changed files with 33 additions and 24 deletions

View File

@ -15,6 +15,7 @@
#include <net/sock.h> #include <net/sock.h>
#include <linux/virtio_vsock.h> #include <linux/virtio_vsock.h>
#include <linux/vhost.h> #include <linux/vhost.h>
#include <linux/hashtable.h>
#include <net/af_vsock.h> #include <net/af_vsock.h>
#include "vhost.h" #include "vhost.h"
@ -27,14 +28,14 @@ enum {
/* Used to track all the vhost_vsock instances on the system. */ /* Used to track all the vhost_vsock instances on the system. */
static DEFINE_SPINLOCK(vhost_vsock_lock); static DEFINE_SPINLOCK(vhost_vsock_lock);
static LIST_HEAD(vhost_vsock_list); static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
struct vhost_vsock { struct vhost_vsock {
struct vhost_dev dev; struct vhost_dev dev;
struct vhost_virtqueue vqs[2]; struct vhost_virtqueue vqs[2];
/* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
struct list_head list; struct hlist_node hash;
struct vhost_work send_pkt_work; struct vhost_work send_pkt_work;
spinlock_t send_pkt_list_lock; spinlock_t send_pkt_list_lock;
@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void)
return VHOST_VSOCK_DEFAULT_HOST_CID; return VHOST_VSOCK_DEFAULT_HOST_CID;
} }
static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) /* Callers that dereference the return value must hold vhost_vsock_lock or the
* RCU read lock.
*/
static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
{ {
struct vhost_vsock *vsock; struct vhost_vsock *vsock;
list_for_each_entry(vsock, &vhost_vsock_list, list) { hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
u32 other_cid = vsock->guest_cid; u32 other_cid = vsock->guest_cid;
/* Skip instances that have no CID yet */ /* Skip instances that have no CID yet */
@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
return NULL; return NULL;
} }
static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
{
struct vhost_vsock *vsock;
spin_lock_bh(&vhost_vsock_lock);
vsock = __vhost_vsock_get(guest_cid);
spin_unlock_bh(&vhost_vsock_lock);
return vsock;
}
static void static void
vhost_transport_do_send_pkt(struct vhost_vsock *vsock, vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq) struct vhost_virtqueue *vq)
@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
struct vhost_vsock *vsock; struct vhost_vsock *vsock;
int len = pkt->len; int len = pkt->len;
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */ /* Find the vhost_vsock according to guest context id */
vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
if (!vsock) { if (!vsock) {
rcu_read_unlock();
virtio_transport_free_pkt(pkt); virtio_transport_free_pkt(pkt);
return -ENODEV; return -ENODEV;
} }
@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
spin_unlock_bh(&vsock->send_pkt_list_lock); spin_unlock_bh(&vsock->send_pkt_list_lock);
vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
rcu_read_unlock();
return len; return len;
} }
@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
struct vhost_vsock *vsock; struct vhost_vsock *vsock;
struct virtio_vsock_pkt *pkt, *n; struct virtio_vsock_pkt *pkt, *n;
int cnt = 0; int cnt = 0;
int ret = -ENODEV;
LIST_HEAD(freeme); LIST_HEAD(freeme);
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */ /* Find the vhost_vsock according to guest context id */
vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
if (!vsock) if (!vsock)
return -ENODEV; goto out;
spin_lock_bh(&vsock->send_pkt_list_lock); spin_lock_bh(&vsock->send_pkt_list_lock);
list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
vhost_poll_queue(&tx_vq->poll); vhost_poll_queue(&tx_vq->poll);
} }
return 0; ret = 0;
out:
rcu_read_unlock();
return ret;
} }
static struct virtio_vsock_pkt * static struct virtio_vsock_pkt *
@ -533,10 +537,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
spin_lock_init(&vsock->send_pkt_list_lock); spin_lock_init(&vsock->send_pkt_list_lock);
INIT_LIST_HEAD(&vsock->send_pkt_list); INIT_LIST_HEAD(&vsock->send_pkt_list);
vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
spin_lock_bh(&vhost_vsock_lock);
list_add_tail(&vsock->list, &vhost_vsock_list);
spin_unlock_bh(&vhost_vsock_lock);
return 0; return 0;
out: out:
@ -585,9 +585,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
struct vhost_vsock *vsock = file->private_data; struct vhost_vsock *vsock = file->private_data;
spin_lock_bh(&vhost_vsock_lock); spin_lock_bh(&vhost_vsock_lock);
list_del(&vsock->list); if (vsock->guest_cid)
hash_del_rcu(&vsock->hash);
spin_unlock_bh(&vhost_vsock_lock); spin_unlock_bh(&vhost_vsock_lock);
/* Wait for other CPUs to finish using vsock */
synchronize_rcu();
/* Iterating over all connections for all CIDs to find orphans is /* Iterating over all connections for all CIDs to find orphans is
* inefficient. Room for improvement here. */ * inefficient. Room for improvement here. */
vsock_for_each_connected_socket(vhost_vsock_reset_orphans); vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
@ -628,12 +632,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
/* Refuse if CID is already in use */ /* Refuse if CID is already in use */
spin_lock_bh(&vhost_vsock_lock); spin_lock_bh(&vhost_vsock_lock);
other = __vhost_vsock_get(guest_cid); other = vhost_vsock_get(guest_cid);
if (other && other != vsock) { if (other && other != vsock) {
spin_unlock_bh(&vhost_vsock_lock); spin_unlock_bh(&vhost_vsock_lock);
return -EADDRINUSE; return -EADDRINUSE;
} }
if (vsock->guest_cid)
hash_del_rcu(&vsock->hash);
vsock->guest_cid = guest_cid; vsock->guest_cid = guest_cid;
hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
spin_unlock_bh(&vhost_vsock_lock); spin_unlock_bh(&vhost_vsock_lock);
return 0; return 0;