From c38f57da428b033f2721b611d84b1f40bde674a8 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 6 Dec 2018 19:14:34 +0000 Subject: vhost/vsock: fix reset orphans race with close timeout If a local process has closed a connected socket and hasn't received a RST packet yet, then the socket remains in the table until a timeout expires. When a vhost_vsock instance is released with the timeout still pending, the socket is never freed because vhost_vsock has already set the SOCK_DONE flag. Check if the close timer is pending and let it close the socket. This prevents the race which can leak sockets. Reported-by: Maximilian Riemensberger Cc: Graham Whaley Signed-off-by: Stefan Hajnoczi Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vsock.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 34bc3ab40c6d..731e2ea2aeca 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -563,13 +563,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk) * executing. */ - if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) { - sock_set_flag(sk, SOCK_DONE); - vsk->peer_shutdown = SHUTDOWN_MASK; - sk->sk_state = SS_UNCONNECTED; - sk->sk_err = ECONNRESET; - sk->sk_error_report(sk); - } + /* If the peer is still valid, no need to reset connection */ + if (vhost_vsock_get(vsk->remote_addr.svm_cid)) + return; + + /* If the close timeout is pending, let it expire. This avoids races + * with the timeout callback. + */ + if (vsk->close_work_scheduled) + return; + + sock_set_flag(sk, SOCK_DONE); + vsk->peer_shutdown = SHUTDOWN_MASK; + sk->sk_state = SS_UNCONNECTED; + sk->sk_err = ECONNRESET; + sk->sk_error_report(sk); } static int vhost_vsock_dev_release(struct inode *inode, struct file *file) -- cgit v1.2.3-59-g8ed1b From 2448a299ec416a80f699940a86f4a6d9a4f643b1 Mon Sep 17 00:00:00 2001 From: Halil Pasic Date: Wed, 26 Sep 2018 18:48:29 +0200 Subject: virtio/s390: avoid race on vcdev->config Currently we have a race on vcdev->config in virtio_ccw_get_config() and in virtio_ccw_set_config(). This normally does not cause problems, as these are usually infrequent operations. However, for some devices writing to/reading from the config space can be triggered through sysfs attributes. For these, userspace can force the race by increasing the frequency. Signed-off-by: Halil Pasic Cc: stable@vger.kernel.org Message-Id: <20180925121309.58524-2-pasic@linux.ibm.com> Signed-off-by: Cornelia Huck Signed-off-by: Michael S. Tsirkin --- drivers/s390/virtio/virtio_ccw.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 97b6f197f007..c94f38ef9a94 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -828,6 +828,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, int ret; struct ccw1 *ccw; void *config_area; + unsigned long flags; ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); if (!ccw) @@ -846,11 +847,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, if (ret) goto out_free; + spin_lock_irqsave(&vcdev->lock, flags); memcpy(vcdev->config, config_area, offset + len); - if (buf) - memcpy(buf, &vcdev->config[offset], len); if (vcdev->config_ready < offset + len) vcdev->config_ready = offset + len; + spin_unlock_irqrestore(&vcdev->lock, flags); + if (buf) + memcpy(buf, config_area + offset, len); out_free: kfree(config_area); @@ -864,6 +867,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, struct virtio_ccw_device *vcdev = to_vc_device(vdev); struct ccw1 *ccw; void *config_area; + unsigned long flags; ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); if (!ccw) @@ -876,9 +880,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, /* Make sure we don't overwrite fields. */ if (vcdev->config_ready < offset) virtio_ccw_get_config(vdev, 0, NULL, offset); + spin_lock_irqsave(&vcdev->lock, flags); memcpy(&vcdev->config[offset], buf, len); /* Write the config area to the host. */ memcpy(config_area, vcdev->config, sizeof(vcdev->config)); + spin_unlock_irqrestore(&vcdev->lock, flags); ccw->cmd_code = CCW_CMD_WRITE_CONF; ccw->flags = 0; ccw->count = offset + len; -- cgit v1.2.3-59-g8ed1b From 78b1a52e05c9db11d293342e8d6d8a230a04b4e7 Mon Sep 17 00:00:00 2001 From: Halil Pasic Date: Wed, 26 Sep 2018 18:48:30 +0200 Subject: virtio/s390: fix race in ccw_io_helper() While ccw_io_helper() seems like intended to be exclusive in a sense that it is supposed to facilitate I/O for at most one thread at any given time, there is actually nothing ensuring that threads won't pile up at vcdev->wait_q. If they do, all threads get woken up and see the status that belongs to some other request than their own. This can lead to bugs. For an example see: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1788432 This race normally does not cause any problems. The operations provided by struct virtio_config_ops are usually invoked in a well defined sequence, normally don't fail, and are normally used quite infrequent too. Yet, if some of the these operations are directly triggered via sysfs attributes, like in the case described by the referenced bug, userspace is given an opportunity to force races by increasing the frequency of the given operations. Let us fix the problem by ensuring, that for each device, we finish processing the previous request before starting with a new one. Signed-off-by: Halil Pasic Reported-by: Colin Ian King Cc: stable@vger.kernel.org Message-Id: <20180925121309.58524-3-pasic@linux.ibm.com> Signed-off-by: Cornelia Huck Signed-off-by: Michael S. Tsirkin --- drivers/s390/virtio/virtio_ccw.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index c94f38ef9a94..c9c57b4a0b71 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -56,6 +56,7 @@ struct virtio_ccw_device { unsigned int revision; /* Transport revision */ wait_queue_head_t wait_q; spinlock_t lock; + struct mutex io_lock; /* Serializes I/O requests */ struct list_head virtqueues; unsigned long indicators; unsigned long indicators2; @@ -296,6 +297,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev, unsigned long flags; int flag = intparm & VIRTIO_CCW_INTPARM_MASK; + mutex_lock(&vcdev->io_lock); do { spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); @@ -308,7 +310,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev, cpu_relax(); } while (ret == -EBUSY); wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); - return ret ? ret : vcdev->err; + ret = ret ? ret : vcdev->err; + mutex_unlock(&vcdev->io_lock); + return ret; } static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, @@ -1253,6 +1257,7 @@ static int virtio_ccw_online(struct ccw_device *cdev) init_waitqueue_head(&vcdev->wait_q); INIT_LIST_HEAD(&vcdev->virtqueues); spin_lock_init(&vcdev->lock); + mutex_init(&vcdev->io_lock); spin_lock_irqsave(get_ccwdev_lock(cdev), flags); dev_set_drvdata(&cdev->dev, vcdev); -- cgit v1.2.3-59-g8ed1b From 834e772c8db0c6a275d75315d90aba4ebbb1e249 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Mon, 5 Nov 2018 10:35:47 +0000 Subject: vhost/vsock: fix use-after-free in network stack callers If the network stack calls .send_pkt()/.cancel_pkt() during .release(), a struct vhost_vsock use-after-free is possible. This occurs because .release() does not wait for other CPUs to stop using struct vhost_vsock. Switch to an RCU-enabled hashtable (indexed by guest CID) so that .release() can wait for other CPUs by calling synchronize_rcu(). This also eliminates vhost_vsock_lock acquisition in the data path so it could have a positive effect on performance. This is CVE-2018-14625 "kernel: use-after-free Read in vhost_transport_send_pkt". Cc: stable@vger.kernel.org Reported-and-tested-by: syzbot+bd391451452fb0b93039@syzkaller.appspotmail.com Reported-by: syzbot+e3e074963495f92a89ed@syzkaller.appspotmail.com Reported-by: syzbot+d5a0a170c5069658b141@syzkaller.appspotmail.com Signed-off-by: Stefan Hajnoczi Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vhost/vsock.c | 57 +++++++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 731e2ea2aeca..98ed5be132c6 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "vhost.h" @@ -27,14 +28,14 @@ enum { /* Used to track all the vhost_vsock instances on the system. */ static DEFINE_SPINLOCK(vhost_vsock_lock); -static LIST_HEAD(vhost_vsock_list); +static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8); struct vhost_vsock { struct vhost_dev dev; struct vhost_virtqueue vqs[2]; - /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ - struct list_head list; + /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */ + struct hlist_node hash; struct vhost_work send_pkt_work; spinlock_t send_pkt_list_lock; @@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void) return VHOST_VSOCK_DEFAULT_HOST_CID; } -static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) +/* Callers that dereference the return value must hold vhost_vsock_lock or the + * RCU read lock. + */ +static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) { struct vhost_vsock *vsock; - list_for_each_entry(vsock, &vhost_vsock_list, list) { + hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { u32 other_cid = vsock->guest_cid; /* Skip instances that have no CID yet */ @@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) return NULL; } -static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) -{ - struct vhost_vsock *vsock; - - spin_lock_bh(&vhost_vsock_lock); - vsock = __vhost_vsock_get(guest_cid); - spin_unlock_bh(&vhost_vsock_lock); - - return vsock; -} - static void vhost_transport_do_send_pkt(struct vhost_vsock *vsock, struct vhost_virtqueue *vq) @@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) struct vhost_vsock *vsock; int len = pkt->len; + rcu_read_lock(); + /* Find the vhost_vsock according to guest context id */ vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { + rcu_read_unlock(); virtio_transport_free_pkt(pkt); return -ENODEV; } @@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) spin_unlock_bh(&vsock->send_pkt_list_lock); vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); + + rcu_read_unlock(); return len; } @@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) struct vhost_vsock *vsock; struct virtio_vsock_pkt *pkt, *n; int cnt = 0; + int ret = -ENODEV; LIST_HEAD(freeme); + rcu_read_lock(); + /* Find the vhost_vsock according to guest context id */ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); if (!vsock) - return -ENODEV; + goto out; spin_lock_bh(&vsock->send_pkt_list_lock); list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { @@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) vhost_poll_queue(&tx_vq->poll); } - return 0; + ret = 0; +out: + rcu_read_unlock(); + return ret; } static struct virtio_vsock_pkt * @@ -533,10 +537,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) spin_lock_init(&vsock->send_pkt_list_lock); INIT_LIST_HEAD(&vsock->send_pkt_list); vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); - - spin_lock_bh(&vhost_vsock_lock); - list_add_tail(&vsock->list, &vhost_vsock_list); - spin_unlock_bh(&vhost_vsock_lock); return 0; out: @@ -585,9 +585,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) struct vhost_vsock *vsock = file->private_data; spin_lock_bh(&vhost_vsock_lock); - list_del(&vsock->list); + if (vsock->guest_cid) + hash_del_rcu(&vsock->hash); spin_unlock_bh(&vhost_vsock_lock); + /* Wait for other CPUs to finish using vsock */ + synchronize_rcu(); + /* Iterating over all connections for all CIDs to find orphans is * inefficient. Room for improvement here. */ vsock_for_each_connected_socket(vhost_vsock_reset_orphans); @@ -628,12 +632,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) /* Refuse if CID is already in use */ spin_lock_bh(&vhost_vsock_lock); - other = __vhost_vsock_get(guest_cid); + other = vhost_vsock_get(guest_cid); if (other && other != vsock) { spin_unlock_bh(&vhost_vsock_lock); return -EADDRINUSE; } + + if (vsock->guest_cid) + hash_del_rcu(&vsock->hash); + vsock->guest_cid = guest_cid; + hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); spin_unlock_bh(&vhost_vsock_lock); return 0; -- cgit v1.2.3-59-g8ed1b