summaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/vhost
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/Kconfig92
-rw-r--r--drivers/vhost/Makefile19
-rw-r--r--drivers/vhost/iotlb.c217
-rw-r--r--drivers/vhost/net.c1812
-rw-r--r--drivers/vhost/scsi.c2527
-rw-r--r--drivers/vhost/test.c376
-rw-r--r--drivers/vhost/test.h9
-rw-r--r--drivers/vhost/vdpa.c1490
-rw-r--r--drivers/vhost/vhost.c2662
-rw-r--r--drivers/vhost/vhost.h328
-rw-r--r--drivers/vhost/vringh.c1509
-rw-r--r--drivers/vhost/vsock.c948
12 files changed, 11989 insertions, 0 deletions
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
new file mode 100644
index 000000000..587fbae06
--- /dev/null
+++ b/drivers/vhost/Kconfig
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VHOST_IOTLB
+ tristate
+ help
+ Generic IOTLB implementation for vhost and vringh.
+ This option is selected by any driver which needs to support
+ an IOMMU in software.
+
+config VHOST_RING
+ tristate
+ select VHOST_IOTLB
+ help
+ This option is selected by any driver which needs to access
+ the host side of a virtio ring.
+
+config VHOST
+ tristate
+ select VHOST_IOTLB
+ help
+ This option is selected by any driver which needs to access
+ the core of vhost.
+
+menuconfig VHOST_MENU
+ bool "VHOST drivers"
+ default y
+
+if VHOST_MENU
+
+config VHOST_NET
+ tristate "Host kernel accelerator for virtio net"
+ depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP)
+ select VHOST
+ help
+ This kernel module can be loaded in host kernel to accelerate
+ guest networking with virtio_net. Not to be confused with virtio_net
+ module itself which needs to be loaded in guest kernel.
+
+ To compile this driver as a module, choose M here: the module will
+ be called vhost_net.
+
+config VHOST_SCSI
+ tristate "VHOST_SCSI TCM fabric driver"
+ depends on TARGET_CORE && EVENTFD
+ select VHOST
+ default n
+ help
+ Say M here to enable the vhost_scsi TCM fabric module
+ for use with virtio-scsi guests
+
+config VHOST_VSOCK
+ tristate "vhost virtio-vsock driver"
+ depends on VSOCKETS && EVENTFD
+ select VHOST
+ select VIRTIO_VSOCKETS_COMMON
+ default n
+ help
+ This kernel module can be loaded in the host kernel to provide AF_VSOCK
+ sockets for communicating with guests. The guests must have the
+ virtio_transport.ko driver loaded to use the virtio-vsock device.
+
+ To compile this driver as a module, choose M here: the module will be called
+ vhost_vsock.
+
+config VHOST_VDPA
+ tristate "Vhost driver for vDPA-based backend"
+ depends on EVENTFD
+ select VHOST
+ select IRQ_BYPASS_MANAGER
+ depends on VDPA
+ help
+ This kernel module can be loaded in host kernel to accelerate
+ guest virtio devices with the vDPA-based backends.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vhost_vdpa.
+
+config VHOST_CROSS_ENDIAN_LEGACY
+ bool "Cross-endian support for vhost"
+ default n
+ help
+ This option allows vhost to support guests with a different byte
+ ordering from host while using legacy virtio.
+
+ Userspace programs can control the feature using the
+ VHOST_SET_VRING_ENDIAN and VHOST_GET_VRING_ENDIAN ioctls.
+
+ This is only useful on a few platforms (ppc64 and arm64). Since it
+ adds some overhead, it is disabled by default.
+
+ If unsure, say "N".
+
+endif
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
new file mode 100644
index 000000000..f3e1897cc
--- /dev/null
+++ b/drivers/vhost/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VHOST_NET) += vhost_net.o
+vhost_net-y := net.o
+
+obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
+vhost_scsi-y := scsi.o
+
+obj-$(CONFIG_VHOST_VSOCK) += vhost_vsock.o
+vhost_vsock-y := vsock.o
+
+obj-$(CONFIG_VHOST_RING) += vringh.o
+
+obj-$(CONFIG_VHOST_VDPA) += vhost_vdpa.o
+vhost_vdpa-y := vdpa.o
+
+obj-$(CONFIG_VHOST) += vhost.o
+
+obj-$(CONFIG_VHOST_IOTLB) += vhost_iotlb.o
+vhost_iotlb-y := iotlb.o
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
new file mode 100644
index 000000000..ea61330a3
--- /dev/null
+++ b/drivers/vhost/iotlb.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Red Hat, Inc.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ * IOTLB implementation for vhost.
+ */
+#include <linux/slab.h>
+#include <linux/vhost_iotlb.h>
+#include <linux/module.h>
+
+#define MOD_VERSION "0.1"
+#define MOD_DESC "VHOST IOTLB"
+#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define MOD_LICENSE "GPL v2"
+
+#define START(map) ((map)->start)
+#define LAST(map) ((map)->last)
+
+INTERVAL_TREE_DEFINE(struct vhost_iotlb_map,
+ rb, __u64, __subtree_last,
+ START, LAST, static inline, vhost_iotlb_itree);
+
+/**
+ * vhost_iotlb_map_free - remove a map node and free it
+ * @iotlb: the IOTLB
+ * @map: the map that want to be remove and freed
+ */
+void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_map *map)
+{
+ vhost_iotlb_itree_remove(map, &iotlb->root);
+ list_del(&map->link);
+ kfree(map);
+ iotlb->nmaps--;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_map_free);
+
+/**
+ * vhost_iotlb_add_range_ctx - add a new range to vhost IOTLB
+ * @iotlb: the IOTLB
+ * @start: start of the IOVA range
+ * @last: last of IOVA range
+ * @addr: the address that is mapped to @start
+ * @perm: access permission of this range
+ * @opaque: the opaque pointer for the new mapping
+ *
+ * Returns an error last is smaller than start or memory allocation
+ * fails
+ */
+int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm,
+ void *opaque)
+{
+ struct vhost_iotlb_map *map;
+
+ if (last < start)
+ return -EFAULT;
+
+ /* If the range being mapped is [0, ULONG_MAX], split it into two entries
+ * otherwise its size would overflow u64.
+ */
+ if (start == 0 && last == ULONG_MAX) {
+ u64 mid = last / 2;
+ int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr,
+ perm, opaque);
+
+ if (err)
+ return err;
+
+ addr += mid + 1;
+ start = mid + 1;
+ }
+
+ if (iotlb->limit &&
+ iotlb->nmaps == iotlb->limit &&
+ iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
+ map = list_first_entry(&iotlb->list, typeof(*map), link);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+
+ map = kmalloc(sizeof(*map), GFP_ATOMIC);
+ if (!map)
+ return -ENOMEM;
+
+ map->start = start;
+ map->size = last - start + 1;
+ map->last = last;
+ map->addr = addr;
+ map->perm = perm;
+ map->opaque = opaque;
+
+ iotlb->nmaps++;
+ vhost_iotlb_itree_insert(map, &iotlb->root);
+
+ INIT_LIST_HEAD(&map->link);
+ list_add_tail(&map->link, &iotlb->list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_add_range_ctx);
+
+int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm)
+{
+ return vhost_iotlb_add_range_ctx(iotlb, start, last,
+ addr, perm, NULL);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
+
+/**
+ * vhost_iotlb_del_range - delete overlapped ranges from vhost IOTLB
+ * @iotlb: the IOTLB
+ * @start: start of the IOVA range
+ * @last: last of IOVA range
+ */
+void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last)
+{
+ struct vhost_iotlb_map *map;
+
+ while ((map = vhost_iotlb_itree_iter_first(&iotlb->root,
+ start, last)))
+ vhost_iotlb_map_free(iotlb, map);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_del_range);
+
+/**
+ * vhost_iotlb_init - initialize a vhost IOTLB
+ * @iotlb: the IOTLB that needs to be initialized
+ * @limit: maximum number of IOTLB entries
+ * @flags: VHOST_IOTLB_FLAG_XXX
+ */
+void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
+ unsigned int flags)
+{
+ iotlb->root = RB_ROOT_CACHED;
+ iotlb->limit = limit;
+ iotlb->nmaps = 0;
+ iotlb->flags = flags;
+ INIT_LIST_HEAD(&iotlb->list);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_init);
+
+/**
+ * vhost_iotlb_alloc - add a new vhost IOTLB
+ * @limit: maximum number of IOTLB entries
+ * @flags: VHOST_IOTLB_FLAG_XXX
+ *
+ * Returns an error is memory allocation fails
+ */
+struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags)
+{
+ struct vhost_iotlb *iotlb = kzalloc(sizeof(*iotlb), GFP_KERNEL);
+
+ if (!iotlb)
+ return NULL;
+
+ vhost_iotlb_init(iotlb, limit, flags);
+
+ return iotlb;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_alloc);
+
+/**
+ * vhost_iotlb_reset - reset vhost IOTLB (free all IOTLB entries)
+ * @iotlb: the IOTLB to be reset
+ */
+void vhost_iotlb_reset(struct vhost_iotlb *iotlb)
+{
+ vhost_iotlb_del_range(iotlb, 0ULL, 0ULL - 1);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_reset);
+
+/**
+ * vhost_iotlb_free - reset and free vhost IOTLB
+ * @iotlb: the IOTLB to be freed
+ */
+void vhost_iotlb_free(struct vhost_iotlb *iotlb)
+{
+ if (iotlb) {
+ vhost_iotlb_reset(iotlb);
+ kfree(iotlb);
+ }
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_free);
+
+/**
+ * vhost_iotlb_itree_first - return the first overlapped range
+ * @iotlb: the IOTLB
+ * @start: start of IOVA range
+ * @last: last byte in IOVA range
+ */
+struct vhost_iotlb_map *
+vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last)
+{
+ return vhost_iotlb_itree_iter_first(&iotlb->root, start, last);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first);
+
+/**
+ * vhost_iotlb_itree_next - return the next overlapped range
+ * @map: the starting map node
+ * @start: start of IOVA range
+ * @last: last byte IOVA range
+ */
+struct vhost_iotlb_map *
+vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last)
+{
+ return vhost_iotlb_itree_iter_next(map, start, last);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_itree_next);
+
+MODULE_VERSION(MOD_VERSION);
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_LICENSE(MOD_LICENSE);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
new file mode 100644
index 000000000..4418192ab
--- /dev/null
+++ b/drivers/vhost/net.c
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * virtio-net server in host kernel.
+ */
+
+#include <linux/compat.h>
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/sched/clock.h>
+#include <linux/sched/signal.h>
+#include <linux/vmalloc.h>
+
+#include <linux/net.h>
+#include <linux/if_packet.h>
+#include <linux/if_arp.h>
+#include <linux/if_tun.h>
+#include <linux/if_macvlan.h>
+#include <linux/if_tap.h>
+#include <linux/if_vlan.h>
+#include <linux/skb_array.h>
+#include <linux/skbuff.h>
+
+#include <net/sock.h>
+#include <net/xdp.h>
+
+#include "vhost.h"
+
+static int experimental_zcopytx = 0;
+module_param(experimental_zcopytx, int, 0444);
+MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
+ " 1 -Enable; 0 - Disable");
+
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_NET_WEIGHT 0x80000
+
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with small
+ * pkts.
+ */
+#define VHOST_NET_PKT_WEIGHT 256
+
+/* MAX number of TX used buffers for outstanding zerocopy */
+#define VHOST_MAX_PEND 128
+#define VHOST_GOODCOPY_LEN 256
+
+/*
+ * For transmit, used buffer len is unused; we override it to track buffer
+ * status internally; used for zerocopy tx only.
+ */
+/* Lower device DMA failed */
+#define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
+/* Lower device DMA done */
+#define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
+/* Lower device DMA in progress */
+#define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
+/* Buffer unused */
+#define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
+
+#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
+
+enum {
+ VHOST_NET_FEATURES = VHOST_FEATURES |
+ (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+ VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
+};
+
+enum {
+ VHOST_NET_VQ_RX = 0,
+ VHOST_NET_VQ_TX = 1,
+ VHOST_NET_VQ_MAX = 2,
+};
+
+struct vhost_net_ubuf_ref {
+ /* refcount follows semantics similar to kref:
+ * 0: object is released
+ * 1: no outstanding ubufs
+ * >1: outstanding ubufs
+ */
+ atomic_t refcount;
+ wait_queue_head_t wait;
+ struct vhost_virtqueue *vq;
+};
+
+#define VHOST_NET_BATCH 64
+struct vhost_net_buf {
+ void **queue;
+ int tail;
+ int head;
+};
+
+struct vhost_net_virtqueue {
+ struct vhost_virtqueue vq;
+ size_t vhost_hlen;
+ size_t sock_hlen;
+ /* vhost zerocopy support fields below: */
+ /* last used idx for outstanding DMA zerocopy buffers */
+ int upend_idx;
+ /* For TX, first used idx for DMA done zerocopy buffers
+ * For RX, number of batched heads
+ */
+ int done_idx;
+ /* Number of XDP frames batched */
+ int batched_xdp;
+ /* an array of userspace buffers info */
+ struct ubuf_info_msgzc *ubuf_info;
+ /* Reference counting for outstanding ubufs.
+ * Protected by vq mutex. Writers must also take device mutex. */
+ struct vhost_net_ubuf_ref *ubufs;
+ struct ptr_ring *rx_ring;
+ struct vhost_net_buf rxq;
+ /* Batched XDP buffs */
+ struct xdp_buff *xdp;
+};
+
+struct vhost_net {
+ struct vhost_dev dev;
+ struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
+ struct vhost_poll poll[VHOST_NET_VQ_MAX];
+ /* Number of TX recently submitted.
+ * Protected by tx vq lock. */
+ unsigned tx_packets;
+ /* Number of times zerocopy TX recently failed.
+ * Protected by tx vq lock. */
+ unsigned tx_zcopy_err;
+ /* Flush in progress. Protected by tx vq lock. */
+ bool tx_flush;
+ /* Private page frag */
+ struct page_frag page_frag;
+ /* Refcount bias of page frag */
+ int refcnt_bias;
+};
+
+static unsigned vhost_net_zcopy_mask __read_mostly;
+
+static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
+{
+ if (rxq->tail != rxq->head)
+ return rxq->queue[rxq->head];
+ else
+ return NULL;
+}
+
+static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
+{
+ return rxq->tail - rxq->head;
+}
+
+static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
+{
+ return rxq->tail == rxq->head;
+}
+
+static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
+{
+ void *ret = vhost_net_buf_get_ptr(rxq);
+ ++rxq->head;
+ return ret;
+}
+
+static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ rxq->head = 0;
+ rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
+ VHOST_NET_BATCH);
+ return rxq->tail;
+}
+
+static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
+ ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
+ vhost_net_buf_get_size(rxq),
+ tun_ptr_free);
+ rxq->head = rxq->tail = 0;
+ }
+}
+
+static int vhost_net_buf_peek_len(void *ptr)
+{
+ if (tun_is_xdp_frame(ptr)) {
+ struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
+
+ return xdpf->len;
+ }
+
+ return __skb_array_len_with_tag(ptr);
+}
+
+static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ if (!vhost_net_buf_is_empty(rxq))
+ goto out;
+
+ if (!vhost_net_buf_produce(nvq))
+ return 0;
+
+out:
+ return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
+}
+
+static void vhost_net_buf_init(struct vhost_net_buf *rxq)
+{
+ rxq->head = rxq->tail = 0;
+}
+
+static void vhost_net_enable_zcopy(int vq)
+{
+ vhost_net_zcopy_mask |= 0x1 << vq;
+}
+
+static struct vhost_net_ubuf_ref *
+vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
+{
+ struct vhost_net_ubuf_ref *ubufs;
+ /* No zero copy backend? Nothing to count. */
+ if (!zcopy)
+ return NULL;
+ ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
+ if (!ubufs)
+ return ERR_PTR(-ENOMEM);
+ atomic_set(&ubufs->refcount, 1);
+ init_waitqueue_head(&ubufs->wait);
+ ubufs->vq = vq;
+ return ubufs;
+}
+
+static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+{
+ int r = atomic_sub_return(1, &ubufs->refcount);
+ if (unlikely(!r))
+ wake_up(&ubufs->wait);
+ return r;
+}
+
+static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
+{
+ vhost_net_ubuf_put(ubufs);
+ wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
+}
+
+static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
+{
+ vhost_net_ubuf_put_and_wait(ubufs);
+ kfree(ubufs);
+}
+
+static void vhost_net_clear_ubuf_info(struct vhost_net *n)
+{
+ int i;
+
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+ kfree(n->vqs[i].ubuf_info);
+ n->vqs[i].ubuf_info = NULL;
+ }
+}
+
+static int vhost_net_set_ubuf_info(struct vhost_net *n)
+{
+ bool zcopy;
+ int i;
+
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+ zcopy = vhost_net_zcopy_mask & (0x1 << i);
+ if (!zcopy)
+ continue;
+ n->vqs[i].ubuf_info =
+ kmalloc_array(UIO_MAXIOV,
+ sizeof(*n->vqs[i].ubuf_info),
+ GFP_KERNEL);
+ if (!n->vqs[i].ubuf_info)
+ goto err;
+ }
+ return 0;
+
+err:
+ vhost_net_clear_ubuf_info(n);
+ return -ENOMEM;
+}
+
+static void vhost_net_vq_reset(struct vhost_net *n)
+{
+ int i;
+
+ vhost_net_clear_ubuf_info(n);
+
+ for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
+ n->vqs[i].done_idx = 0;
+ n->vqs[i].upend_idx = 0;
+ n->vqs[i].ubufs = NULL;
+ n->vqs[i].vhost_hlen = 0;
+ n->vqs[i].sock_hlen = 0;
+ vhost_net_buf_init(&n->vqs[i].rxq);
+ }
+
+}
+
+static void vhost_net_tx_packet(struct vhost_net *net)
+{
+ ++net->tx_packets;
+ if (net->tx_packets < 1024)
+ return;
+ net->tx_packets = 0;
+ net->tx_zcopy_err = 0;
+}
+
+static void vhost_net_tx_err(struct vhost_net *net)
+{
+ ++net->tx_zcopy_err;
+}
+
+static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
+{
+ /* TX flush waits for outstanding DMAs to be done.
+ * Don't start new DMAs.
+ */
+ return !net->tx_flush &&
+ net->tx_packets / 64 >= net->tx_zcopy_err;
+}
+
+static bool vhost_sock_zcopy(struct socket *sock)
+{
+ return unlikely(experimental_zcopytx) &&
+ sock_flag(sock->sk, SOCK_ZEROCOPY);
+}
+
+static bool vhost_sock_xdp(struct socket *sock)
+{
+ return sock_flag(sock->sk, SOCK_XDP);
+}
+
+/* In case of DMA done not in order in lower device driver for some reason.
+ * upend_idx is used to track end of used idx, done_idx is used to track head
+ * of used idx. Once lower device DMA done contiguously, we will signal KVM
+ * guest used idx.
+ */
+static void vhost_zerocopy_signal_used(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ int i, add;
+ int j = 0;
+
+ for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
+ if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
+ vhost_net_tx_err(net);
+ if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
+ vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
+ ++j;
+ } else
+ break;
+ }
+ while (j) {
+ add = min(UIO_MAXIOV - nvq->done_idx, j);
+ vhost_add_used_and_signal_n(vq->dev, vq,
+ &vq->heads[nvq->done_idx], add);
+ nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
+ j -= add;
+ }
+}
+
+static void vhost_zerocopy_callback(struct sk_buff *skb,
+ struct ubuf_info *ubuf_base, bool success)
+{
+ struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
+ struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
+ struct vhost_virtqueue *vq = ubufs->vq;
+ int cnt;
+
+ rcu_read_lock_bh();
+
+ /* set len to mark this desc buffers done DMA */
+ vq->heads[ubuf->desc].len = success ?
+ VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+ cnt = vhost_net_ubuf_put(ubufs);
+
+ /*
+ * Trigger polling thread if guest stopped submitting new buffers:
+ * in this case, the refcount after decrement will eventually reach 1.
+ * We also trigger polling periodically after each 16 packets
+ * (the value 16 here is more or less arbitrary, it's tuned to trigger
+ * less than 10% of times).
+ */
+ if (cnt <= 1 || !(cnt % 16))
+ vhost_poll_queue(&vq->poll);
+
+ rcu_read_unlock_bh();
+}
+
+static inline unsigned long busy_clock(void)
+{
+ return local_clock() >> 10;
+}
+
+static bool vhost_can_busy_poll(unsigned long endtime)
+{
+ return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
+ !signal_pending(current));
+}
+
+static void vhost_net_disable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ if (!vhost_vq_get_backend(vq))
+ return;
+ vhost_poll_stop(poll);
+}
+
+static int vhost_net_enable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ struct socket *sock;
+
+ sock = vhost_vq_get_backend(vq);
+ if (!sock)
+ return 0;
+
+ return vhost_poll_start(poll, sock->file);
+}
+
+static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_dev *dev = vq->dev;
+
+ if (!nvq->done_idx)
+ return;
+
+ vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
+ nvq->done_idx = 0;
+}
+
+static void vhost_tx_batch(struct vhost_net *net,
+ struct vhost_net_virtqueue *nvq,
+ struct socket *sock,
+ struct msghdr *msghdr)
+{
+ struct tun_msg_ctl ctl = {
+ .type = TUN_MSG_PTR,
+ .num = nvq->batched_xdp,
+ .ptr = nvq->xdp,
+ };
+ int i, err;
+
+ if (nvq->batched_xdp == 0)
+ goto signal_used;
+
+ msghdr->msg_control = &ctl;
+ msghdr->msg_controllen = sizeof(ctl);
+ err = sock->ops->sendmsg(sock, msghdr, 0);
+ if (unlikely(err < 0)) {
+ vq_err(&nvq->vq, "Fail to batch sending packets\n");
+
+ /* free pages owned by XDP; since this is an unlikely error path,
+ * keep it simple and avoid more complex bulk update for the
+ * used pages
+ */
+ for (i = 0; i < nvq->batched_xdp; ++i)
+ put_page(virt_to_head_page(nvq->xdp[i].data));
+ nvq->batched_xdp = 0;
+ nvq->done_idx = 0;
+ return;
+ }
+
+signal_used:
+ vhost_net_signal_used(nvq);
+ nvq->batched_xdp = 0;
+}
+
+static int sock_has_rx_data(struct socket *sock)
+{
+ if (unlikely(!sock))
+ return 0;
+
+ if (sock->ops->peek_len)
+ return sock->ops->peek_len(sock);
+
+ return skb_queue_empty(&sock->sk->sk_receive_queue);
+}
+
+static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
+{
+ if (!vhost_vq_avail_empty(&net->dev, vq)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ vhost_poll_queue(&vq->poll);
+ }
+}
+
+static void vhost_net_busy_poll(struct vhost_net *net,
+ struct vhost_virtqueue *rvq,
+ struct vhost_virtqueue *tvq,
+ bool *busyloop_intr,
+ bool poll_rx)
+{
+ unsigned long busyloop_timeout;
+ unsigned long endtime;
+ struct socket *sock;
+ struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
+
+ /* Try to hold the vq mutex of the paired virtqueue. We can't
+ * use mutex_lock() here since we could not guarantee a
+ * consistenet lock ordering.
+ */
+ if (!mutex_trylock(&vq->mutex))
+ return;
+
+ vhost_disable_notify(&net->dev, vq);
+ sock = vhost_vq_get_backend(rvq);
+
+ busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
+ tvq->busyloop_timeout;
+
+ preempt_disable();
+ endtime = busy_clock() + busyloop_timeout;
+
+ while (vhost_can_busy_poll(endtime)) {
+ if (vhost_has_work(&net->dev)) {
+ *busyloop_intr = true;
+ break;
+ }
+
+ if ((sock_has_rx_data(sock) &&
+ !vhost_vq_avail_empty(&net->dev, rvq)) ||
+ !vhost_vq_avail_empty(&net->dev, tvq))
+ break;
+
+ cpu_relax();
+ }
+
+ preempt_enable();
+
+ if (poll_rx || sock_has_rx_data(sock))
+ vhost_net_busy_poll_try_queue(net, vq);
+ else if (!poll_rx) /* On tx here, sock has no rx data. */
+ vhost_enable_notify(&net->dev, rvq);
+
+ mutex_unlock(&vq->mutex);
+}
+
+static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
+ struct vhost_net_virtqueue *tnvq,
+ unsigned int *out_num, unsigned int *in_num,
+ struct msghdr *msghdr, bool *busyloop_intr)
+{
+ struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_virtqueue *rvq = &rnvq->vq;
+ struct vhost_virtqueue *tvq = &tnvq->vq;
+
+ int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
+ out_num, in_num, NULL, NULL);
+
+ if (r == tvq->num && tvq->busyloop_timeout) {
+ /* Flush batched packets first */
+ if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
+ vhost_tx_batch(net, tnvq,
+ vhost_vq_get_backend(tvq),
+ msghdr);
+
+ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
+
+ r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
+ out_num, in_num, NULL, NULL);
+ }
+
+ return r;
+}
+
+static bool vhost_exceeds_maxpend(struct vhost_net *net)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+
+ return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
+ min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
+}
+
+static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
+ size_t hdr_size, int out)
+{
+ /* Skip header. TODO: support TSO. */
+ size_t len = iov_length(vq->iov, out);
+
+ iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len);
+ iov_iter_advance(iter, hdr_size);
+
+ return iov_iter_count(iter);
+}
+
+static int get_tx_bufs(struct vhost_net *net,
+ struct vhost_net_virtqueue *nvq,
+ struct msghdr *msg,
+ unsigned int *out, unsigned int *in,
+ size_t *len, bool *busyloop_intr)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ int ret;
+
+ ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
+
+ if (ret < 0 || ret == vq->num)
+ return ret;
+
+ if (*in) {
+ vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
+ *out, *in);
+ return -EFAULT;
+ }
+
+ /* Sanity check */
+ *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
+ if (*len == 0) {
+ vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
+ *len, nvq->vhost_hlen);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
+{
+ return total_len < VHOST_NET_WEIGHT &&
+ !vhost_vq_avail_empty(vq->dev, vq);
+}
+
+static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
+ struct page_frag *pfrag, gfp_t gfp)
+{
+ if (pfrag->page) {
+ if (pfrag->offset + sz <= pfrag->size)
+ return true;
+ __page_frag_cache_drain(pfrag->page, net->refcnt_bias);
+ }
+
+ pfrag->offset = 0;
+ net->refcnt_bias = 0;
+ if (SKB_FRAG_PAGE_ORDER) {
+ /* Avoid direct reclaim but allow kswapd to wake */
+ pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
+ __GFP_COMP | __GFP_NOWARN |
+ __GFP_NORETRY,
+ SKB_FRAG_PAGE_ORDER);
+ if (likely(pfrag->page)) {
+ pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
+ goto done;
+ }
+ }
+ pfrag->page = alloc_page(gfp);
+ if (likely(pfrag->page)) {
+ pfrag->size = PAGE_SIZE;
+ goto done;
+ }
+ return false;
+
+done:
+ net->refcnt_bias = USHRT_MAX;
+ page_ref_add(pfrag->page, USHRT_MAX - 1);
+ return true;
+}
+
+#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
+
+static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
+ struct iov_iter *from)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_net *net = container_of(vq->dev, struct vhost_net,
+ dev);
+ struct socket *sock = vhost_vq_get_backend(vq);
+ struct page_frag *alloc_frag = &net->page_frag;
+ struct virtio_net_hdr *gso;
+ struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
+ struct tun_xdp_hdr *hdr;
+ size_t len = iov_iter_count(from);
+ int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
+ int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
+ int sock_hlen = nvq->sock_hlen;
+ void *buf;
+ int copied;
+
+ if (unlikely(len < nvq->sock_hlen))
+ return -EFAULT;
+
+ if (SKB_DATA_ALIGN(len + pad) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
+ return -ENOSPC;
+
+ buflen += SKB_DATA_ALIGN(len + pad);
+ alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
+ if (unlikely(!vhost_net_page_frag_refill(net, buflen,
+ alloc_frag, GFP_KERNEL)))
+ return -ENOMEM;
+
+ buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+ copied = copy_page_from_iter(alloc_frag->page,
+ alloc_frag->offset +
+ offsetof(struct tun_xdp_hdr, gso),
+ sock_hlen, from);
+ if (copied != sock_hlen)
+ return -EFAULT;
+
+ hdr = buf;
+ gso = &hdr->gso;
+
+ if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ vhost16_to_cpu(vq, gso->csum_start) +
+ vhost16_to_cpu(vq, gso->csum_offset) + 2 >
+ vhost16_to_cpu(vq, gso->hdr_len)) {
+ gso->hdr_len = cpu_to_vhost16(vq,
+ vhost16_to_cpu(vq, gso->csum_start) +
+ vhost16_to_cpu(vq, gso->csum_offset) + 2);
+
+ if (vhost16_to_cpu(vq, gso->hdr_len) > len)
+ return -EINVAL;
+ }
+
+ len -= sock_hlen;
+ copied = copy_page_from_iter(alloc_frag->page,
+ alloc_frag->offset + pad,
+ len, from);
+ if (copied != len)
+ return -EFAULT;
+
+ xdp_init_buff(xdp, buflen, NULL);
+ xdp_prepare_buff(xdp, buf, pad, len, true);
+ hdr->buflen = buflen;
+
+ --net->refcnt_bias;
+ alloc_frag->offset += buflen;
+
+ ++nvq->batched_xdp;
+
+ return 0;
+}
+
+static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ unsigned out, in;
+ int head;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = MSG_DONTWAIT,
+ };
+ size_t len, total_len = 0;
+ int err;
+ int sent_pkts = 0;
+ bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
+
+ do {
+ bool busyloop_intr = false;
+
+ if (nvq->done_idx == VHOST_NET_BATCH)
+ vhost_tx_batch(net, nvq, sock, &msg);
+
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+ &busyloop_intr);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev,
+ vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ total_len += len;
+
+ /* For simplicity, TX batching is only enabled if
+ * sndbuf is unlimited.
+ */
+ if (sock_can_batch) {
+ err = vhost_net_build_xdp(nvq, &msg.msg_iter);
+ if (!err) {
+ goto done;
+ } else if (unlikely(err != -ENOSPC)) {
+ vhost_tx_batch(net, nvq, sock, &msg);
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+
+ /* We can't build XDP buff, go for single
+ * packet path but let's flush batched
+ * packets.
+ */
+ vhost_tx_batch(net, nvq, sock, &msg);
+ msg.msg_control = NULL;
+ } else {
+ if (tx_can_batch(vq, total_len))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags &= ~MSG_MORE;
+ }
+
+ err = sock->ops->sendmsg(sock, &msg, len);
+ if (unlikely(err < 0)) {
+ if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
+ pr_debug("Truncated TX packet: len %d != %zd\n",
+ err, len);
+done:
+ vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
+ vq->heads[nvq->done_idx].len = 0;
+ ++nvq->done_idx;
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
+
+ vhost_tx_batch(net, nvq, sock, &msg);
+}
+
+static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ unsigned out, in;
+ int head;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = MSG_DONTWAIT,
+ };
+ struct tun_msg_ctl ctl;
+ size_t len, total_len = 0;
+ int err;
+ struct vhost_net_ubuf_ref *ubufs;
+ struct ubuf_info_msgzc *ubuf;
+ bool zcopy_used;
+ int sent_pkts = 0;
+
+ do {
+ bool busyloop_intr;
+
+ /* Release DMAs done buffers first */
+ vhost_zerocopy_signal_used(net, vq);
+
+ busyloop_intr = false;
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+ &busyloop_intr);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ zcopy_used = len >= VHOST_GOODCOPY_LEN
+ && !vhost_exceeds_maxpend(net)
+ && vhost_net_tx_select_zcopy(net);
+
+ /* use msg_control to pass vhost zerocopy ubuf info to skb */
+ if (zcopy_used) {
+ ubuf = nvq->ubuf_info + nvq->upend_idx;
+ vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
+ vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
+ ubuf->ctx = nvq->ubufs;
+ ubuf->desc = nvq->upend_idx;
+ ubuf->ubuf.callback = vhost_zerocopy_callback;
+ ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
+ refcount_set(&ubuf->ubuf.refcnt, 1);
+ msg.msg_control = &ctl;
+ ctl.type = TUN_MSG_UBUF;
+ ctl.ptr = &ubuf->ubuf;
+ msg.msg_controllen = sizeof(ctl);
+ ubufs = nvq->ubufs;
+ atomic_inc(&ubufs->refcount);
+ nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
+ } else {
+ msg.msg_control = NULL;
+ ubufs = NULL;
+ }
+ total_len += len;
+ if (tx_can_batch(vq, total_len) &&
+ likely(!vhost_exceeds_maxpend(net))) {
+ msg.msg_flags |= MSG_MORE;
+ } else {
+ msg.msg_flags &= ~MSG_MORE;
+ }
+
+ err = sock->ops->sendmsg(sock, &msg, len);
+ if (unlikely(err < 0)) {
+ bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
+
+ if (zcopy_used) {
+ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+ vhost_net_ubuf_put(ubufs);
+ if (retry)
+ nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
+ % UIO_MAXIOV;
+ else
+ vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
+ }
+ if (retry) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
+ pr_debug("Truncated TX packet: "
+ " len %d != %zd\n", err, len);
+ if (!zcopy_used)
+ vhost_add_used_and_signal(&net->dev, vq, head, 0);
+ else
+ vhost_zerocopy_signal_used(net, vq);
+ vhost_net_tx_packet(net);
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
+}
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_tx(struct vhost_net *net)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct socket *sock;
+
+ mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
+ sock = vhost_vq_get_backend(vq);
+ if (!sock)
+ goto out;
+
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
+ vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
+
+ if (vhost_sock_zcopy(sock))
+ handle_tx_zerocopy(net, sock);
+ else
+ handle_tx_copy(net, sock);
+
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
+{
+ struct sk_buff *head;
+ int len = 0;
+ unsigned long flags;
+
+ if (rvq->rx_ring)
+ return vhost_net_buf_peek(rvq);
+
+ spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
+ head = skb_peek(&sk->sk_receive_queue);
+ if (likely(head)) {
+ len = head->len;
+ if (skb_vlan_tag_present(head))
+ len += VLAN_HLEN;
+ }
+
+ spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
+ return len;
+}
+
+static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
+ bool *busyloop_intr)
+{
+ struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *rvq = &rnvq->vq;
+ struct vhost_virtqueue *tvq = &tnvq->vq;
+ int len = peek_head_len(rnvq, sk);
+
+ if (!len && rvq->busyloop_timeout) {
+ /* Flush batched heads first */
+ vhost_net_signal_used(rnvq);
+ /* Both tx vq and rx socket were polled here */
+ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
+
+ len = peek_head_len(rnvq, sk);
+ }
+
+ return len;
+}
+
+/* This is a multi-buffer version of vhost_get_desc, that works if
+ * vq has read descriptors only.
+ * @vq - the relevant virtqueue
+ * @datalen - data length we'll be reading
+ * @iovcount - returned count of io vectors we fill
+ * @log - vhost log
+ * @log_num - log offset
+ * @quota - headcount quota, 1 for big buffer
+ * returns number of buffer heads allocated, negative on error
+ */
+static int get_rx_bufs(struct vhost_virtqueue *vq,
+ struct vring_used_elem *heads,
+ int datalen,
+ unsigned *iovcount,
+ struct vhost_log *log,
+ unsigned *log_num,
+ unsigned int quota)
+{
+ unsigned int out, in;
+ int seg = 0;
+ int headcount = 0;
+ unsigned d;
+ int r, nlogs = 0;
+ /* len is always initialized before use since we are always called with
+ * datalen > 0.
+ */
+ u32 len;
+
+ while (datalen > 0 && headcount < quota) {
+ if (unlikely(seg >= UIO_MAXIOV)) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ r = vhost_get_vq_desc(vq, vq->iov + seg,
+ ARRAY_SIZE(vq->iov) - seg, &out,
+ &in, log, log_num);
+ if (unlikely(r < 0))
+ goto err;
+
+ d = r;
+ if (d == vq->num) {
+ r = 0;
+ goto err;
+ }
+ if (unlikely(out || in <= 0)) {
+ vq_err(vq, "unexpected descriptor format for RX: "
+ "out %d, in %d\n", out, in);
+ r = -EINVAL;
+ goto err;
+ }
+ if (unlikely(log)) {
+ nlogs += *log_num;
+ log += *log_num;
+ }
+ heads[headcount].id = cpu_to_vhost32(vq, d);
+ len = iov_length(vq->iov + seg, in);
+ heads[headcount].len = cpu_to_vhost32(vq, len);
+ datalen -= len;
+ ++headcount;
+ seg += in;
+ }
+ heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
+ *iovcount = seg;
+ if (unlikely(log))
+ *log_num = nlogs;
+
+ /* Detect overrun */
+ if (unlikely(datalen > 0)) {
+ r = UIO_MAXIOV + 1;
+ goto err;
+ }
+ return headcount;
+err:
+ vhost_discard_vq_desc(vq, headcount);
+ return r;
+}
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_rx(struct vhost_net *net)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ unsigned in, log;
+ struct vhost_log *vq_log;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL, /* FIXME: get and handle RX aux data. */
+ .msg_controllen = 0,
+ .msg_flags = MSG_DONTWAIT,
+ };
+ struct virtio_net_hdr hdr = {
+ .flags = 0,
+ .gso_type = VIRTIO_NET_HDR_GSO_NONE
+ };
+ size_t total_len = 0;
+ int err, mergeable;
+ s16 headcount;
+ size_t vhost_hlen, sock_hlen;
+ size_t vhost_len, sock_len;
+ bool busyloop_intr = false;
+ struct socket *sock;
+ struct iov_iter fixup;
+ __virtio16 num_buffers;
+ int recv_pkts = 0;
+
+ mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
+ sock = vhost_vq_get_backend(vq);
+ if (!sock)
+ goto out;
+
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
+ vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
+
+ vhost_hlen = nvq->vhost_hlen;
+ sock_hlen = nvq->sock_hlen;
+
+ vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
+ vq->log : NULL;
+ mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
+
+ do {
+ sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+ &busyloop_intr);
+ if (!sock_len)
+ break;
+ sock_len += sock_hlen;
+ vhost_len = sock_len + vhost_hlen;
+ headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
+ vhost_len, &in, vq_log, &log,
+ likely(mergeable) ? UIO_MAXIOV : 1);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(headcount < 0))
+ goto out;
+ /* OK, now we need to know about added descriptors. */
+ if (!headcount) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ /* They have slipped one in as we were
+ * doing that: check again. */
+ vhost_disable_notify(&net->dev, vq);
+ continue;
+ }
+ /* Nothing new? Wait for eventfd to tell us
+ * they refilled. */
+ goto out;
+ }
+ busyloop_intr = false;
+ if (nvq->rx_ring)
+ msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
+ /* On overrun, truncate and discard */
+ if (unlikely(headcount > UIO_MAXIOV)) {
+ iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1);
+ err = sock->ops->recvmsg(sock, &msg,
+ 1, MSG_DONTWAIT | MSG_TRUNC);
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ continue;
+ }
+ /* We don't need to be notified again. */
+ iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len);
+ fixup = msg.msg_iter;
+ if (unlikely((vhost_hlen))) {
+ /* We will supply the header ourselves
+ * TODO: support TSO.
+ */
+ iov_iter_advance(&msg.msg_iter, vhost_hlen);
+ }
+ err = sock->ops->recvmsg(sock, &msg,
+ sock_len, MSG_DONTWAIT | MSG_TRUNC);
+ /* Userspace might have consumed the packet meanwhile:
+ * it's not supposed to do this usually, but might be hard
+ * to prevent. Discard data we got (if any) and keep going. */
+ if (unlikely(err != sock_len)) {
+ pr_debug("Discarded rx packet: "
+ " len %d, expected %zd\n", err, sock_len);
+ vhost_discard_vq_desc(vq, headcount);
+ continue;
+ }
+ /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
+ if (unlikely(vhost_hlen)) {
+ if (copy_to_iter(&hdr, sizeof(hdr),
+ &fixup) != sizeof(hdr)) {
+ vq_err(vq, "Unable to write vnet_hdr "
+ "at addr %p\n", vq->iov->iov_base);
+ goto out;
+ }
+ } else {
+ /* Header came from socket; we'll need to patch
+ * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+ */
+ iov_iter_advance(&fixup, sizeof(hdr));
+ }
+ /* TODO: Should check and handle checksum. */
+
+ num_buffers = cpu_to_vhost16(vq, headcount);
+ if (likely(mergeable) &&
+ copy_to_iter(&num_buffers, sizeof num_buffers,
+ &fixup) != sizeof num_buffers) {
+ vq_err(vq, "Failed num_buffers write");
+ vhost_discard_vq_desc(vq, headcount);
+ goto out;
+ }
+ nvq->done_idx += headcount;
+ if (nvq->done_idx > VHOST_NET_BATCH)
+ vhost_net_signal_used(nvq);
+ if (unlikely(vq_log))
+ vhost_log_write(vq, vq_log, log, vhost_len,
+ vq->iov, in);
+ total_len += vhost_len;
+ } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
+
+ if (unlikely(busyloop_intr))
+ vhost_poll_queue(&vq->poll);
+ else if (!sock_len)
+ vhost_net_enable_vq(net, vq);
+out:
+ vhost_net_signal_used(nvq);
+ mutex_unlock(&vq->mutex);
+}
+
+static void handle_tx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
+
+ handle_tx(net);
+}
+
+static void handle_rx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
+
+ handle_rx(net);
+}
+
+static void handle_tx_net(struct vhost_work *work)
+{
+ struct vhost_net *net = container_of(work, struct vhost_net,
+ poll[VHOST_NET_VQ_TX].work);
+ handle_tx(net);
+}
+
+static void handle_rx_net(struct vhost_work *work)
+{
+ struct vhost_net *net = container_of(work, struct vhost_net,
+ poll[VHOST_NET_VQ_RX].work);
+ handle_rx(net);
+}
+
+static int vhost_net_open(struct inode *inode, struct file *f)
+{
+ struct vhost_net *n;
+ struct vhost_dev *dev;
+ struct vhost_virtqueue **vqs;
+ void **queue;
+ struct xdp_buff *xdp;
+ int i;
+
+ n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!n)
+ return -ENOMEM;
+ vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ kvfree(n);
+ return -ENOMEM;
+ }
+
+ queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
+ GFP_KERNEL);
+ if (!queue) {
+ kfree(vqs);
+ kvfree(n);
+ return -ENOMEM;
+ }
+ n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
+
+ xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
+ if (!xdp) {
+ kfree(vqs);
+ kvfree(n);
+ kfree(queue);
+ return -ENOMEM;
+ }
+ n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
+
+ dev = &n->dev;
+ vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
+ vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
+ n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
+ n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
+ for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
+ n->vqs[i].ubufs = NULL;
+ n->vqs[i].ubuf_info = NULL;
+ n->vqs[i].upend_idx = 0;
+ n->vqs[i].done_idx = 0;
+ n->vqs[i].batched_xdp = 0;
+ n->vqs[i].vhost_hlen = 0;
+ n->vqs[i].sock_hlen = 0;
+ n->vqs[i].rx_ring = NULL;
+ vhost_net_buf_init(&n->vqs[i].rxq);
+ }
+ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+ UIO_MAXIOV + VHOST_NET_BATCH,
+ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
+ NULL);
+
+ vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
+ vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
+
+ f->private_data = n;
+ n->page_frag.page = NULL;
+ n->refcnt_bias = 0;
+
+ return 0;
+}
+
+static struct socket *vhost_net_stop_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct socket *sock;
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+
+ mutex_lock(&vq->mutex);
+ sock = vhost_vq_get_backend(vq);
+ vhost_net_disable_vq(n, vq);
+ vhost_vq_set_backend(vq, NULL);
+ vhost_net_buf_unproduce(nvq);
+ nvq->rx_ring = NULL;
+ mutex_unlock(&vq->mutex);
+ return sock;
+}
+
+static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
+ struct socket **rx_sock)
+{
+ *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
+ *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
+}
+
+static void vhost_net_flush(struct vhost_net *n)
+{
+ vhost_dev_flush(&n->dev);
+ if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
+ mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
+ n->tx_flush = true;
+ mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
+ /* Wait for all lower device DMAs done. */
+ vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
+ mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
+ n->tx_flush = false;
+ atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
+ mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
+ }
+}
+
+static int vhost_net_release(struct inode *inode, struct file *f)
+{
+ struct vhost_net *n = f->private_data;
+ struct socket *tx_sock;
+ struct socket *rx_sock;
+
+ vhost_net_stop(n, &tx_sock, &rx_sock);
+ vhost_net_flush(n);
+ vhost_dev_stop(&n->dev);
+ vhost_dev_cleanup(&n->dev);
+ vhost_net_vq_reset(n);
+ if (tx_sock)
+ sockfd_put(tx_sock);
+ if (rx_sock)
+ sockfd_put(rx_sock);
+ /* Make sure no callbacks are outstanding */
+ synchronize_rcu();
+ /* We do an extra flush before freeing memory,
+ * since jobs can re-queue themselves. */
+ vhost_net_flush(n);
+ kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
+ kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
+ kfree(n->dev.vqs);
+ if (n->page_frag.page)
+ __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
+ kvfree(n);
+ return 0;
+}
+
+static struct socket *get_raw_socket(int fd)
+{
+ int r;
+ struct socket *sock = sockfd_lookup(fd, &r);
+
+ if (!sock)
+ return ERR_PTR(-ENOTSOCK);
+
+ /* Parameter checking */
+ if (sock->sk->sk_type != SOCK_RAW) {
+ r = -ESOCKTNOSUPPORT;
+ goto err;
+ }
+
+ if (sock->sk->sk_family != AF_PACKET) {
+ r = -EPFNOSUPPORT;
+ goto err;
+ }
+ return sock;
+err:
+ sockfd_put(sock);
+ return ERR_PTR(r);
+}
+
+static struct ptr_ring *get_tap_ptr_ring(struct file *file)
+{
+ struct ptr_ring *ring;
+ ring = tun_get_tx_ring(file);
+ if (!IS_ERR(ring))
+ goto out;
+ ring = tap_get_ptr_ring(file);
+ if (!IS_ERR(ring))
+ goto out;
+ ring = NULL;
+out:
+ return ring;
+}
+
+static struct socket *get_tap_socket(int fd)
+{
+ struct file *file = fget(fd);
+ struct socket *sock;
+
+ if (!file)
+ return ERR_PTR(-EBADF);
+ sock = tun_get_socket(file);
+ if (!IS_ERR(sock))
+ return sock;
+ sock = tap_get_socket(file);
+ if (IS_ERR(sock))
+ fput(file);
+ return sock;
+}
+
+static struct socket *get_socket(int fd)
+{
+ struct socket *sock;
+
+ /* special case to disable backend */
+ if (fd == -1)
+ return NULL;
+ sock = get_raw_socket(fd);
+ if (!IS_ERR(sock))
+ return sock;
+ sock = get_tap_socket(fd);
+ if (!IS_ERR(sock))
+ return sock;
+ return ERR_PTR(-ENOTSOCK);
+}
+
+static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
+{
+ struct socket *sock, *oldsock;
+ struct vhost_virtqueue *vq;
+ struct vhost_net_virtqueue *nvq;
+ struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ if (index >= VHOST_NET_VQ_MAX) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ vq = &n->vqs[index].vq;
+ nvq = &n->vqs[index];
+ mutex_lock(&vq->mutex);
+
+ if (fd == -1)
+ vhost_clear_msg(&n->dev);
+
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(vq)) {
+ r = -EFAULT;
+ goto err_vq;
+ }
+ sock = get_socket(fd);
+ if (IS_ERR(sock)) {
+ r = PTR_ERR(sock);
+ goto err_vq;
+ }
+
+ /* start polling new socket */
+ oldsock = vhost_vq_get_backend(vq);
+ if (sock != oldsock) {
+ ubufs = vhost_net_ubuf_alloc(vq,
+ sock && vhost_sock_zcopy(sock));
+ if (IS_ERR(ubufs)) {
+ r = PTR_ERR(ubufs);
+ goto err_ubufs;
+ }
+
+ vhost_net_disable_vq(n, vq);
+ vhost_vq_set_backend(vq, sock);
+ vhost_net_buf_unproduce(nvq);
+ r = vhost_vq_init_access(vq);
+ if (r)
+ goto err_used;
+ r = vhost_net_enable_vq(n, vq);
+ if (r)
+ goto err_used;
+ if (index == VHOST_NET_VQ_RX) {
+ if (sock)
+ nvq->rx_ring = get_tap_ptr_ring(sock->file);
+ else
+ nvq->rx_ring = NULL;
+ }
+
+ oldubufs = nvq->ubufs;
+ nvq->ubufs = ubufs;
+
+ n->tx_packets = 0;
+ n->tx_zcopy_err = 0;
+ n->tx_flush = false;
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ if (oldubufs) {
+ vhost_net_ubuf_put_wait_and_free(oldubufs);
+ mutex_lock(&vq->mutex);
+ vhost_zerocopy_signal_used(n, vq);
+ mutex_unlock(&vq->mutex);
+ }
+
+ if (oldsock) {
+ vhost_dev_flush(&n->dev);
+ sockfd_put(oldsock);
+ }
+
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
+err_used:
+ vhost_vq_set_backend(vq, oldsock);
+ vhost_net_enable_vq(n, vq);
+ if (ubufs)
+ vhost_net_ubuf_put_wait_and_free(ubufs);
+err_ubufs:
+ if (sock)
+ sockfd_put(sock);
+err_vq:
+ mutex_unlock(&vq->mutex);
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
+static long vhost_net_reset_owner(struct vhost_net *n)
+{
+ struct socket *tx_sock = NULL;
+ struct socket *rx_sock = NULL;
+ long err;
+ struct vhost_iotlb *umem;
+
+ mutex_lock(&n->dev.mutex);
+ err = vhost_dev_check_owner(&n->dev);
+ if (err)
+ goto done;
+ umem = vhost_dev_reset_owner_prepare();
+ if (!umem) {
+ err = -ENOMEM;
+ goto done;
+ }
+ vhost_net_stop(n, &tx_sock, &rx_sock);
+ vhost_net_flush(n);
+ vhost_dev_stop(&n->dev);
+ vhost_dev_reset_owner(&n->dev, umem);
+ vhost_net_vq_reset(n);
+done:
+ mutex_unlock(&n->dev.mutex);
+ if (tx_sock)
+ sockfd_put(tx_sock);
+ if (rx_sock)
+ sockfd_put(rx_sock);
+ return err;
+}
+
+static int vhost_net_set_features(struct vhost_net *n, u64 features)
+{
+ size_t vhost_hlen, sock_hlen, hdr_len;
+ int i;
+
+ hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_VERSION_1))) ?
+ sizeof(struct virtio_net_hdr_mrg_rxbuf) :
+ sizeof(struct virtio_net_hdr);
+ if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
+ /* vhost provides vnet_hdr */
+ vhost_hlen = hdr_len;
+ sock_hlen = 0;
+ } else {
+ /* socket provides vnet_hdr */
+ vhost_hlen = 0;
+ sock_hlen = hdr_len;
+ }
+ mutex_lock(&n->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&n->dev))
+ goto out_unlock;
+
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+ if (vhost_init_device_iotlb(&n->dev, true))
+ goto out_unlock;
+ }
+
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+ mutex_lock(&n->vqs[i].vq.mutex);
+ n->vqs[i].vq.acked_features = features;
+ n->vqs[i].vhost_hlen = vhost_hlen;
+ n->vqs[i].sock_hlen = sock_hlen;
+ mutex_unlock(&n->vqs[i].vq.mutex);
+ }
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
+out_unlock:
+ mutex_unlock(&n->dev.mutex);
+ return -EFAULT;
+}
+
+static long vhost_net_set_owner(struct vhost_net *n)
+{
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ if (vhost_dev_has_owner(&n->dev)) {
+ r = -EBUSY;
+ goto out;
+ }
+ r = vhost_net_set_ubuf_info(n);
+ if (r)
+ goto out;
+ r = vhost_dev_set_owner(&n->dev);
+ if (r)
+ vhost_net_clear_ubuf_info(n);
+ vhost_net_flush(n);
+out:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
+static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_net *n = f->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 __user *featurep = argp;
+ struct vhost_vring_file backend;
+ u64 features;
+ int r;
+
+ switch (ioctl) {
+ case VHOST_NET_SET_BACKEND:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ return vhost_net_set_backend(n, backend.index, backend.fd);
+ case VHOST_GET_FEATURES:
+ features = VHOST_NET_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof features))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof features))
+ return -EFAULT;
+ if (features & ~VHOST_NET_FEATURES)
+ return -EOPNOTSUPP;
+ return vhost_net_set_features(n, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_NET_BACKEND_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_NET_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&n->dev, features);
+ return 0;
+ case VHOST_RESET_OWNER:
+ return vhost_net_reset_owner(n);
+ case VHOST_SET_OWNER:
+ return vhost_net_set_owner(n);
+ default:
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_ioctl(&n->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vring_ioctl(&n->dev, ioctl, argp);
+ else
+ vhost_net_flush(n);
+ mutex_unlock(&n->dev.mutex);
+ return r;
+ }
+}
+
+static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
+static const struct file_operations vhost_net_fops = {
+ .owner = THIS_MODULE,
+ .release = vhost_net_release,
+ .read_iter = vhost_net_chr_read_iter,
+ .write_iter = vhost_net_chr_write_iter,
+ .poll = vhost_net_chr_poll,
+ .unlocked_ioctl = vhost_net_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .open = vhost_net_open,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice vhost_net_misc = {
+ .minor = VHOST_NET_MINOR,
+ .name = "vhost-net",
+ .fops = &vhost_net_fops,
+};
+
+static int __init vhost_net_init(void)
+{
+ if (experimental_zcopytx)
+ vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
+ return misc_register(&vhost_net_misc);
+}
+module_init(vhost_net_init);
+
+static void __exit vhost_net_exit(void)
+{
+ misc_deregister(&vhost_net_misc);
+}
+module_exit(vhost_net_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael S. Tsirkin");
+MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
+MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
+MODULE_ALIAS("devname:vhost-net");
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
new file mode 100644
index 000000000..d5ecb8876
--- /dev/null
+++ b/drivers/vhost/scsi.c
@@ -0,0 +1,2527 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*******************************************************************************
+ * Vhost kernel TCM fabric driver for virtio SCSI initiators
+ *
+ * (C) Copyright 2010-2013 Datera, Inc.
+ * (C) Copyright 2010-2012 IBM Corp.
+ *
+ * Authors: Nicholas A. Bellinger <nab@daterainc.com>
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/compat.h>
+#include <linux/eventfd.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/miscdevice.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <linux/vhost.h>
+#include <linux/virtio_scsi.h>
+#include <linux/llist.h>
+#include <linux/bitmap.h>
+
+#include "vhost.h"
+
+#define VHOST_SCSI_VERSION "v0.1"
+#define VHOST_SCSI_NAMELEN 256
+#define VHOST_SCSI_MAX_CDB_SIZE 32
+#define VHOST_SCSI_PREALLOC_SGLS 2048
+#define VHOST_SCSI_PREALLOC_UPAGES 2048
+#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
+
+/* Max number of requests before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * request.
+ */
+#define VHOST_SCSI_WEIGHT 256
+
+struct vhost_scsi_inflight {
+ /* Wait for the flush operation to finish */
+ struct completion comp;
+ /* Refcount for the inflight reqs */
+ struct kref kref;
+};
+
+struct vhost_scsi_cmd {
+ /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
+ int tvc_vq_desc;
+ /* virtio-scsi initiator task attribute */
+ int tvc_task_attr;
+ /* virtio-scsi response incoming iovecs */
+ int tvc_in_iovs;
+ /* virtio-scsi initiator data direction */
+ enum dma_data_direction tvc_data_direction;
+ /* Expected data transfer length from virtio-scsi header */
+ u32 tvc_exp_data_len;
+ /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
+ u64 tvc_tag;
+ /* The number of scatterlists associated with this cmd */
+ u32 tvc_sgl_count;
+ u32 tvc_prot_sgl_count;
+ /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
+ u32 tvc_lun;
+ /* Pointer to the SGL formatted memory from virtio-scsi */
+ struct scatterlist *tvc_sgl;
+ struct scatterlist *tvc_prot_sgl;
+ struct page **tvc_upages;
+ /* Pointer to response header iovec */
+ struct iovec *tvc_resp_iov;
+ /* Pointer to vhost_scsi for our device */
+ struct vhost_scsi *tvc_vhost;
+ /* Pointer to vhost_virtqueue for the cmd */
+ struct vhost_virtqueue *tvc_vq;
+ /* Pointer to vhost nexus memory */
+ struct vhost_scsi_nexus *tvc_nexus;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd tvc_se_cmd;
+ /* Copy of the incoming SCSI command descriptor block (CDB) */
+ unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
+ /* Completed commands list, serviced from vhost worker thread */
+ struct llist_node tvc_completion_list;
+ /* Used to track inflight cmd */
+ struct vhost_scsi_inflight *inflight;
+};
+
+struct vhost_scsi_nexus {
+ /* Pointer to TCM session for I_T Nexus */
+ struct se_session *tvn_se_sess;
+};
+
+struct vhost_scsi_tpg {
+ /* Vhost port target portal group tag for TCM */
+ u16 tport_tpgt;
+ /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
+ int tv_tpg_port_count;
+ /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
+ int tv_tpg_vhost_count;
+ /* Used for enabling T10-PI with legacy devices */
+ int tv_fabric_prot_type;
+ /* list for vhost_scsi_list */
+ struct list_head tv_tpg_list;
+ /* Used to protect access for tpg_nexus */
+ struct mutex tv_tpg_mutex;
+ /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
+ struct vhost_scsi_nexus *tpg_nexus;
+ /* Pointer back to vhost_scsi_tport */
+ struct vhost_scsi_tport *tport;
+ /* Returned by vhost_scsi_make_tpg() */
+ struct se_portal_group se_tpg;
+ /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
+ struct vhost_scsi *vhost_scsi;
+ struct list_head tmf_queue;
+};
+
+struct vhost_scsi_tport {
+ /* SCSI protocol the tport is providing */
+ u8 tport_proto_id;
+ /* Binary World Wide unique Port Name for Vhost Target port */
+ u64 tport_wwpn;
+ /* ASCII formatted WWPN for Vhost Target port */
+ char tport_name[VHOST_SCSI_NAMELEN];
+ /* Returned by vhost_scsi_make_tport() */
+ struct se_wwn tport_wwn;
+};
+
+struct vhost_scsi_evt {
+ /* event to be sent to guest */
+ struct virtio_scsi_event event;
+ /* event list, serviced from vhost worker thread */
+ struct llist_node list;
+};
+
+enum {
+ VHOST_SCSI_VQ_CTL = 0,
+ VHOST_SCSI_VQ_EVT = 1,
+ VHOST_SCSI_VQ_IO = 2,
+};
+
+/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
+enum {
+ VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
+ (1ULL << VIRTIO_SCSI_F_T10_PI)
+};
+
+#define VHOST_SCSI_MAX_TARGET 256
+#define VHOST_SCSI_MAX_IO_VQ 1024
+#define VHOST_SCSI_MAX_EVENT 128
+
+static unsigned vhost_scsi_max_io_vqs = 128;
+module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
+MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
+
+struct vhost_scsi_virtqueue {
+ struct vhost_virtqueue vq;
+ /*
+ * Reference counting for inflight reqs, used for flush operation. At
+ * each time, one reference tracks new commands submitted, while we
+ * wait for another one to reach 0.
+ */
+ struct vhost_scsi_inflight inflights[2];
+ /*
+ * Indicate current inflight in use, protected by vq->mutex.
+ * Writers must also take dev mutex and flush under it.
+ */
+ int inflight_idx;
+ struct vhost_scsi_cmd *scsi_cmds;
+ struct sbitmap scsi_tags;
+ int max_cmds;
+};
+
+struct vhost_scsi {
+ /* Protected by vhost_scsi->dev.mutex */
+ struct vhost_scsi_tpg **vs_tpg;
+ char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
+
+ struct vhost_dev dev;
+ struct vhost_scsi_virtqueue *vqs;
+ unsigned long *compl_bitmap;
+ struct vhost_scsi_inflight **old_inflight;
+
+ struct vhost_work vs_completion_work; /* cmd completion work item */
+ struct llist_head vs_completion_list; /* cmd completion queue */
+
+ struct vhost_work vs_event_work; /* evt injection work item */
+ struct llist_head vs_event_list; /* evt injection queue */
+
+ bool vs_events_missed; /* any missed events, protected by vq->mutex */
+ int vs_events_nr; /* num of pending events, protected by vq->mutex */
+};
+
+struct vhost_scsi_tmf {
+ struct vhost_work vwork;
+ struct vhost_scsi_tpg *tpg;
+ struct vhost_scsi *vhost;
+ struct vhost_scsi_virtqueue *svq;
+ struct list_head queue_entry;
+
+ struct se_cmd se_cmd;
+ u8 scsi_resp;
+ struct vhost_scsi_inflight *inflight;
+ struct iovec resp_iov;
+ int in_iovs;
+ int vq_desc;
+};
+
+/*
+ * Context for processing request and control queue operations.
+ */
+struct vhost_scsi_ctx {
+ int head;
+ unsigned int out, in;
+ size_t req_size, rsp_size;
+ size_t out_size, in_size;
+ u8 *target, *lunp;
+ void *req;
+ struct iov_iter out_iter;
+};
+
+/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
+static DEFINE_MUTEX(vhost_scsi_mutex);
+static LIST_HEAD(vhost_scsi_list);
+
+static void vhost_scsi_done_inflight(struct kref *kref)
+{
+ struct vhost_scsi_inflight *inflight;
+
+ inflight = container_of(kref, struct vhost_scsi_inflight, kref);
+ complete(&inflight->comp);
+}
+
+static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
+ struct vhost_scsi_inflight *old_inflight[])
+{
+ struct vhost_scsi_inflight *new_inflight;
+ struct vhost_virtqueue *vq;
+ int idx, i;
+
+ for (i = 0; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+
+ mutex_lock(&vq->mutex);
+
+ /* store old infight */
+ idx = vs->vqs[i].inflight_idx;
+ if (old_inflight)
+ old_inflight[i] = &vs->vqs[i].inflights[idx];
+
+ /* setup new infight */
+ vs->vqs[i].inflight_idx = idx ^ 1;
+ new_inflight = &vs->vqs[i].inflights[idx ^ 1];
+ kref_init(&new_inflight->kref);
+ init_completion(&new_inflight->comp);
+
+ mutex_unlock(&vq->mutex);
+ }
+}
+
+static struct vhost_scsi_inflight *
+vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_inflight *inflight;
+ struct vhost_scsi_virtqueue *svq;
+
+ svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
+ inflight = &svq->inflights[svq->inflight_idx];
+ kref_get(&inflight->kref);
+
+ return inflight;
+}
+
+static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
+{
+ kref_put(&inflight->kref, vhost_scsi_done_inflight);
+}
+
+static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tport *tport = tpg->tport;
+
+ return &tport->tport_name[0];
+}
+
+static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ return tpg->tport_tpgt;
+}
+
+static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+
+ return tpg->tv_fabric_prot_type;
+}
+
+static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
+{
+ struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
+ struct vhost_scsi_cmd, tvc_se_cmd);
+ struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
+ int i;
+
+ if (tv_cmd->tvc_sgl_count) {
+ for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+ put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+ }
+ if (tv_cmd->tvc_prot_sgl_count) {
+ for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
+ put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
+ }
+
+ sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
+ vhost_scsi_put_inflight(inflight);
+}
+
+static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
+{
+ struct vhost_scsi_tpg *tpg = tmf->tpg;
+ struct vhost_scsi_inflight *inflight = tmf->inflight;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ vhost_scsi_put_inflight(inflight);
+}
+
+static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
+{
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd,
+ struct vhost_scsi_tmf, se_cmd);
+
+ vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
+ } else {
+ struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+ struct vhost_scsi_cmd, tvc_se_cmd);
+ struct vhost_scsi *vs = cmd->tvc_vhost;
+
+ llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
+ vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+ }
+}
+
+static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
+{
+ /* Go ahead and process the write immediately */
+ target_execute_cmd(se_cmd);
+ return 0;
+}
+
+static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
+{
+ transport_generic_free_cmd(se_cmd, 0);
+ return 0;
+}
+
+static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
+{
+ transport_generic_free_cmd(se_cmd, 0);
+ return 0;
+}
+
+static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
+ se_cmd);
+
+ tmf->scsi_resp = se_cmd->se_tmr_req->response;
+ transport_generic_free_cmd(&tmf->se_cmd, 0);
+}
+
+static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
+}
+
+static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
+{
+ vs->vs_events_nr--;
+ kfree(evt);
+}
+
+static struct vhost_scsi_evt *
+vhost_scsi_allocate_evt(struct vhost_scsi *vs,
+ u32 event, u32 reason)
+{
+ struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+ struct vhost_scsi_evt *evt;
+
+ if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
+ vs->vs_events_missed = true;
+ return NULL;
+ }
+
+ evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+ if (!evt) {
+ vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
+ vs->vs_events_missed = true;
+ return NULL;
+ }
+
+ evt->event.event = cpu_to_vhost32(vq, event);
+ evt->event.reason = cpu_to_vhost32(vq, reason);
+ vs->vs_events_nr++;
+
+ return evt;
+}
+
+static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
+{
+ return target_put_sess_cmd(se_cmd);
+}
+
+static void
+vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
+{
+ struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+ struct virtio_scsi_event *event = &evt->event;
+ struct virtio_scsi_event __user *eventp;
+ unsigned out, in;
+ int head, ret;
+
+ if (!vhost_vq_get_backend(vq)) {
+ vs->vs_events_missed = true;
+ return;
+ }
+
+again:
+ vhost_disable_notify(&vs->dev, vq);
+ head = vhost_get_vq_desc(vq, vq->iov,
+ ARRAY_SIZE(vq->iov), &out, &in,
+ NULL, NULL);
+ if (head < 0) {
+ vs->vs_events_missed = true;
+ return;
+ }
+ if (head == vq->num) {
+ if (vhost_enable_notify(&vs->dev, vq))
+ goto again;
+ vs->vs_events_missed = true;
+ return;
+ }
+
+ if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
+ vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
+ vq->iov[out].iov_len);
+ vs->vs_events_missed = true;
+ return;
+ }
+
+ if (vs->vs_events_missed) {
+ event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
+ vs->vs_events_missed = false;
+ }
+
+ eventp = vq->iov[out].iov_base;
+ ret = __copy_to_user(eventp, event, sizeof(*event));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+ else
+ vq_err(vq, "Faulted on vhost_scsi_send_event\n");
+}
+
+static void vhost_scsi_evt_work(struct vhost_work *work)
+{
+ struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+ vs_event_work);
+ struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+ struct vhost_scsi_evt *evt, *t;
+ struct llist_node *llnode;
+
+ mutex_lock(&vq->mutex);
+ llnode = llist_del_all(&vs->vs_event_list);
+ llist_for_each_entry_safe(evt, t, llnode, list) {
+ vhost_scsi_do_evt_work(vs, evt);
+ vhost_scsi_free_evt(vs, evt);
+ }
+ mutex_unlock(&vq->mutex);
+}
+
+/* Fill in status and signal that we are done processing this command
+ *
+ * This is scheduled in the vhost work queue so we are called with the owner
+ * process mm and can access the vring.
+ */
+static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+{
+ struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+ vs_completion_work);
+ struct virtio_scsi_cmd_resp v_rsp;
+ struct vhost_scsi_cmd *cmd, *t;
+ struct llist_node *llnode;
+ struct se_cmd *se_cmd;
+ struct iov_iter iov_iter;
+ int ret, vq;
+
+ bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
+ llnode = llist_del_all(&vs->vs_completion_list);
+ llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
+ se_cmd = &cmd->tvc_se_cmd;
+
+ pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
+ cmd, se_cmd->residual_count, se_cmd->scsi_status);
+
+ memset(&v_rsp, 0, sizeof(v_rsp));
+ v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
+ /* TODO is status_qualifier field needed? */
+ v_rsp.status = se_cmd->scsi_status;
+ v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
+ se_cmd->scsi_sense_length);
+ memcpy(v_rsp.sense, cmd->tvc_sense_buf,
+ se_cmd->scsi_sense_length);
+
+ iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
+ cmd->tvc_in_iovs, sizeof(v_rsp));
+ ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
+ if (likely(ret == sizeof(v_rsp))) {
+ struct vhost_scsi_virtqueue *q;
+ vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
+ q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
+ vq = q - vs->vqs;
+ __set_bit(vq, vs->compl_bitmap);
+ } else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
+ vhost_scsi_release_cmd_res(se_cmd);
+ }
+
+ vq = -1;
+ while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
+ < vs->dev.nvqs)
+ vhost_signal(&vs->dev, &vs->vqs[vq].vq);
+}
+
+static struct vhost_scsi_cmd *
+vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
+ unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
+ u32 exp_data_len, int data_direction)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *cmd;
+ struct vhost_scsi_nexus *tv_nexus;
+ struct scatterlist *sg, *prot_sg;
+ struct iovec *tvc_resp_iov;
+ struct page **pages;
+ int tag;
+
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ pr_err("Unable to locate active struct vhost_scsi_nexus\n");
+ return ERR_PTR(-EIO);
+ }
+
+ tag = sbitmap_get(&svq->scsi_tags);
+ if (tag < 0) {
+ pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cmd = &svq->scsi_cmds[tag];
+ sg = cmd->tvc_sgl;
+ prot_sg = cmd->tvc_prot_sgl;
+ pages = cmd->tvc_upages;
+ tvc_resp_iov = cmd->tvc_resp_iov;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->tvc_sgl = sg;
+ cmd->tvc_prot_sgl = prot_sg;
+ cmd->tvc_upages = pages;
+ cmd->tvc_se_cmd.map_tag = tag;
+ cmd->tvc_tag = scsi_tag;
+ cmd->tvc_lun = lun;
+ cmd->tvc_task_attr = task_attr;
+ cmd->tvc_exp_data_len = exp_data_len;
+ cmd->tvc_data_direction = data_direction;
+ cmd->tvc_nexus = tv_nexus;
+ cmd->inflight = vhost_scsi_get_inflight(vq);
+ cmd->tvc_resp_iov = tvc_resp_iov;
+
+ memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
+
+ return cmd;
+}
+
+/*
+ * Map a user memory range into a scatterlist
+ *
+ * Returns the number of scatterlist entries used or -errno on error.
+ */
+static int
+vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
+ struct iov_iter *iter,
+ struct scatterlist *sgl,
+ bool write)
+{
+ struct page **pages = cmd->tvc_upages;
+ struct scatterlist *sg = sgl;
+ ssize_t bytes;
+ size_t offset;
+ unsigned int npages = 0;
+
+ bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
+ VHOST_SCSI_PREALLOC_UPAGES, &offset);
+ /* No pages were pinned */
+ if (bytes <= 0)
+ return bytes < 0 ? bytes : -EFAULT;
+
+ while (bytes) {
+ unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
+ sg_set_page(sg++, pages[npages++], n, offset);
+ bytes -= n;
+ offset = 0;
+ }
+ return npages;
+}
+
+static int
+vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
+{
+ int sgl_count = 0;
+
+ if (!iter || !iter->iov) {
+ pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
+ " present\n", __func__, bytes);
+ return -EINVAL;
+ }
+
+ sgl_count = iov_iter_npages(iter, 0xffff);
+ if (sgl_count > max_sgls) {
+ pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
+ " max_sgls: %d\n", __func__, sgl_count, max_sgls);
+ return -EINVAL;
+ }
+ return sgl_count;
+}
+
+static int
+vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
+ struct iov_iter *iter,
+ struct scatterlist *sg, int sg_count)
+{
+ struct scatterlist *p = sg;
+ int ret;
+
+ while (iov_iter_count(iter)) {
+ ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
+ if (ret < 0) {
+ while (p < sg) {
+ struct page *page = sg_page(p++);
+ if (page)
+ put_page(page);
+ }
+ return ret;
+ }
+ sg += ret;
+ }
+ return 0;
+}
+
+static int
+vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
+ size_t prot_bytes, struct iov_iter *prot_iter,
+ size_t data_bytes, struct iov_iter *data_iter)
+{
+ int sgl_count, ret;
+ bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
+
+ if (prot_bytes) {
+ sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
+ VHOST_SCSI_PREALLOC_PROT_SGLS);
+ if (sgl_count < 0)
+ return sgl_count;
+
+ sg_init_table(cmd->tvc_prot_sgl, sgl_count);
+ cmd->tvc_prot_sgl_count = sgl_count;
+ pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
+ cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
+
+ ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
+ cmd->tvc_prot_sgl,
+ cmd->tvc_prot_sgl_count);
+ if (ret < 0) {
+ cmd->tvc_prot_sgl_count = 0;
+ return ret;
+ }
+ }
+ sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
+ VHOST_SCSI_PREALLOC_SGLS);
+ if (sgl_count < 0)
+ return sgl_count;
+
+ sg_init_table(cmd->tvc_sgl, sgl_count);
+ cmd->tvc_sgl_count = sgl_count;
+ pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
+ cmd->tvc_sgl, cmd->tvc_sgl_count);
+
+ ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
+ cmd->tvc_sgl, cmd->tvc_sgl_count);
+ if (ret < 0) {
+ cmd->tvc_sgl_count = 0;
+ return ret;
+ }
+ return 0;
+}
+
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+ switch (attr) {
+ case VIRTIO_SCSI_S_SIMPLE:
+ return TCM_SIMPLE_TAG;
+ case VIRTIO_SCSI_S_ORDERED:
+ return TCM_ORDERED_TAG;
+ case VIRTIO_SCSI_S_HEAD:
+ return TCM_HEAD_TAG;
+ case VIRTIO_SCSI_S_ACA:
+ return TCM_ACA_TAG;
+ default:
+ break;
+ }
+ return TCM_SIMPLE_TAG;
+}
+
+static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
+{
+ struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
+ struct vhost_scsi_nexus *tv_nexus;
+ struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
+
+ /* FIXME: BIDI operation */
+ if (cmd->tvc_sgl_count) {
+ sg_ptr = cmd->tvc_sgl;
+
+ if (cmd->tvc_prot_sgl_count)
+ sg_prot_ptr = cmd->tvc_prot_sgl;
+ else
+ se_cmd->prot_pto = true;
+ } else {
+ sg_ptr = NULL;
+ }
+ tv_nexus = cmd->tvc_nexus;
+
+ se_cmd->tag = 0;
+ target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
+ cmd->tvc_lun, cmd->tvc_exp_data_len,
+ vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
+ cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
+
+ if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
+ cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+ cmd->tvc_prot_sgl_count, GFP_KERNEL))
+ return;
+
+ target_queue_submission(se_cmd);
+}
+
+static void
+vhost_scsi_send_bad_target(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq,
+ int head, unsigned out)
+{
+ struct virtio_scsi_cmd_resp __user *resp;
+ struct virtio_scsi_cmd_resp rsp;
+ int ret;
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+ resp = vq->iov[out].iov_base;
+ ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+}
+
+static int
+vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc)
+{
+ int ret = -ENXIO;
+
+ vc->head = vhost_get_vq_desc(vq, vq->iov,
+ ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
+ NULL, NULL);
+
+ pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
+ vc->head, vc->out, vc->in);
+
+ /* On error, stop handling until the next kick. */
+ if (unlikely(vc->head < 0))
+ goto done;
+
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (vc->head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
+ vhost_disable_notify(&vs->dev, vq);
+ ret = -EAGAIN;
+ }
+ goto done;
+ }
+
+ /*
+ * Get the size of request and response buffers.
+ * FIXME: Not correct for BIDI operation
+ */
+ vc->out_size = iov_length(vq->iov, vc->out);
+ vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
+
+ /*
+ * Copy over the virtio-scsi request header, which for a
+ * ANY_LAYOUT enabled guest may span multiple iovecs, or a
+ * single iovec may contain both the header + outgoing
+ * WRITE payloads.
+ *
+ * copy_from_iter() will advance out_iter, so that it will
+ * point at the start of the outgoing WRITE payload, if
+ * DMA_TO_DEVICE is set.
+ */
+ iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
+ ret = 0;
+
+done:
+ return ret;
+}
+
+static int
+vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
+{
+ if (unlikely(vc->in_size < vc->rsp_size)) {
+ vq_err(vq,
+ "Response buf too small, need min %zu bytes got %zu",
+ vc->rsp_size, vc->in_size);
+ return -EINVAL;
+ } else if (unlikely(vc->out_size < vc->req_size)) {
+ vq_err(vq,
+ "Request buf too small, need min %zu bytes got %zu",
+ vc->req_size, vc->out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
+ struct vhost_scsi_tpg **tpgp)
+{
+ int ret = -EIO;
+
+ if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
+ &vc->out_iter))) {
+ vq_err(vq, "Faulted on copy_from_iter_full\n");
+ } else if (unlikely(*vc->lunp != 1)) {
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
+ vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
+ } else {
+ struct vhost_scsi_tpg **vs_tpg, *tpg;
+
+ vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
+
+ tpg = READ_ONCE(vs_tpg[*vc->target]);
+ if (unlikely(!tpg)) {
+ vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
+ } else {
+ if (tpgp)
+ *tpgp = tpg;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static u16 vhost_buf_to_lun(u8 *lun_buf)
+{
+ return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
+}
+
+static void
+vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_tpg **vs_tpg, *tpg;
+ struct virtio_scsi_cmd_req v_req;
+ struct virtio_scsi_cmd_req_pi v_req_pi;
+ struct vhost_scsi_ctx vc;
+ struct vhost_scsi_cmd *cmd;
+ struct iov_iter in_iter, prot_iter, data_iter;
+ u64 tag;
+ u32 exp_data_len, data_direction;
+ int ret, prot_bytes, i, c = 0;
+ u16 lun;
+ u8 task_attr;
+ bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
+ void *cdb;
+
+ mutex_lock(&vq->mutex);
+ /*
+ * We can handle the vq only after the endpoint is setup by calling the
+ * VHOST_SCSI_SET_ENDPOINT ioctl.
+ */
+ vs_tpg = vhost_vq_get_backend(vq);
+ if (!vs_tpg)
+ goto out;
+
+ memset(&vc, 0, sizeof(vc));
+ vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+
+ vhost_disable_notify(&vs->dev, vq);
+
+ do {
+ ret = vhost_scsi_get_desc(vs, vq, &vc);
+ if (ret)
+ goto err;
+
+ /*
+ * Setup pointers and values based upon different virtio-scsi
+ * request header if T10_PI is enabled in KVM guest.
+ */
+ if (t10_pi) {
+ vc.req = &v_req_pi;
+ vc.req_size = sizeof(v_req_pi);
+ vc.lunp = &v_req_pi.lun[0];
+ vc.target = &v_req_pi.lun[1];
+ } else {
+ vc.req = &v_req;
+ vc.req_size = sizeof(v_req);
+ vc.lunp = &v_req.lun[0];
+ vc.target = &v_req.lun[1];
+ }
+
+ /*
+ * Validate the size of request and response buffers.
+ * Check for a sane response buffer so we can report
+ * early errors back to the guest.
+ */
+ ret = vhost_scsi_chk_size(vq, &vc);
+ if (ret)
+ goto err;
+
+ ret = vhost_scsi_get_req(vq, &vc, &tpg);
+ if (ret)
+ goto err;
+
+ ret = -EIO; /* bad target on any error from here on */
+
+ /*
+ * Determine data_direction by calculating the total outgoing
+ * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
+ * response headers respectively.
+ *
+ * For DMA_TO_DEVICE this is out_iter, which is already pointing
+ * to the right place.
+ *
+ * For DMA_FROM_DEVICE, the iovec will be just past the end
+ * of the virtio-scsi response header in either the same
+ * or immediately following iovec.
+ *
+ * Any associated T10_PI bytes for the outgoing / incoming
+ * payloads are included in calculation of exp_data_len here.
+ */
+ prot_bytes = 0;
+
+ if (vc.out_size > vc.req_size) {
+ data_direction = DMA_TO_DEVICE;
+ exp_data_len = vc.out_size - vc.req_size;
+ data_iter = vc.out_iter;
+ } else if (vc.in_size > vc.rsp_size) {
+ data_direction = DMA_FROM_DEVICE;
+ exp_data_len = vc.in_size - vc.rsp_size;
+
+ iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
+ vc.rsp_size + exp_data_len);
+ iov_iter_advance(&in_iter, vc.rsp_size);
+ data_iter = in_iter;
+ } else {
+ data_direction = DMA_NONE;
+ exp_data_len = 0;
+ }
+ /*
+ * If T10_PI header + payload is present, setup prot_iter values
+ * and recalculate data_iter for vhost_scsi_mapal() mapping to
+ * host scatterlists via get_user_pages_fast().
+ */
+ if (t10_pi) {
+ if (v_req_pi.pi_bytesout) {
+ if (data_direction != DMA_TO_DEVICE) {
+ vq_err(vq, "Received non zero pi_bytesout,"
+ " but wrong data_direction\n");
+ goto err;
+ }
+ prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
+ } else if (v_req_pi.pi_bytesin) {
+ if (data_direction != DMA_FROM_DEVICE) {
+ vq_err(vq, "Received non zero pi_bytesin,"
+ " but wrong data_direction\n");
+ goto err;
+ }
+ prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
+ }
+ /*
+ * Set prot_iter to data_iter and truncate it to
+ * prot_bytes, and advance data_iter past any
+ * preceeding prot_bytes that may be present.
+ *
+ * Also fix up the exp_data_len to reflect only the
+ * actual data payload length.
+ */
+ if (prot_bytes) {
+ exp_data_len -= prot_bytes;
+ prot_iter = data_iter;
+ iov_iter_truncate(&prot_iter, prot_bytes);
+ iov_iter_advance(&data_iter, prot_bytes);
+ }
+ tag = vhost64_to_cpu(vq, v_req_pi.tag);
+ task_attr = v_req_pi.task_attr;
+ cdb = &v_req_pi.cdb[0];
+ lun = vhost_buf_to_lun(v_req_pi.lun);
+ } else {
+ tag = vhost64_to_cpu(vq, v_req.tag);
+ task_attr = v_req.task_attr;
+ cdb = &v_req.cdb[0];
+ lun = vhost_buf_to_lun(v_req.lun);
+ }
+ /*
+ * Check that the received CDB size does not exceeded our
+ * hardcoded max for vhost-scsi, then get a pre-allocated
+ * cmd descriptor for the new virtio-scsi tag.
+ *
+ * TODO what if cdb was too small for varlen cdb header?
+ */
+ if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
+ vq_err(vq, "Received SCSI CDB with command_size: %d that"
+ " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+ scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
+ goto err;
+ }
+ cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
+ exp_data_len + prot_bytes,
+ data_direction);
+ if (IS_ERR(cmd)) {
+ vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
+ PTR_ERR(cmd));
+ goto err;
+ }
+ cmd->tvc_vhost = vs;
+ cmd->tvc_vq = vq;
+ for (i = 0; i < vc.in ; i++)
+ cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
+ cmd->tvc_in_iovs = vc.in;
+
+ pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
+ cmd->tvc_cdb[0], cmd->tvc_lun);
+ pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
+ " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
+
+ if (data_direction != DMA_NONE) {
+ if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
+ &prot_iter, exp_data_len,
+ &data_iter))) {
+ vq_err(vq, "Failed to map iov to sgl\n");
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
+ goto err;
+ }
+ }
+ /*
+ * Save the descriptor from vhost_get_vq_desc() to be used to
+ * complete the virtio-scsi request in TCM callback context via
+ * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
+ */
+ cmd->tvc_vq_desc = vc.head;
+ vhost_scsi_target_queue_cmd(cmd);
+ ret = 0;
+err:
+ /*
+ * ENXIO: No more requests, or read error, wait for next kick
+ * EINVAL: Invalid response buffer, drop the request
+ * EIO: Respond with bad target
+ * EAGAIN: Pending request
+ */
+ if (ret == -ENXIO)
+ break;
+ else if (ret == -EIO)
+ vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void
+vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ int in_iovs, int vq_desc, struct iovec *resp_iov,
+ int tmf_resp_code)
+{
+ struct virtio_scsi_ctrl_tmf_resp rsp;
+ struct iov_iter iov_iter;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.response = tmf_resp_code;
+
+ iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
+ vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
+ else
+ pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
+}
+
+static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
+{
+ struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
+ vwork);
+ int resp_code;
+
+ if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
+ resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ else
+ resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+
+ vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
+ tmf->vq_desc, &tmf->resp_iov, resp_code);
+ vhost_scsi_release_tmf_res(tmf);
+}
+
+static void
+vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
+ struct vhost_virtqueue *vq,
+ struct virtio_scsi_ctrl_tmf_req *vtmf,
+ struct vhost_scsi_ctx *vc)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_tmf *tmf;
+
+ if (vhost32_to_cpu(vq, vtmf->subtype) !=
+ VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
+ goto send_reject;
+
+ if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
+ pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
+ goto send_reject;
+ }
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ if (list_empty(&tpg->tmf_queue)) {
+ pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ goto send_reject;
+ }
+
+ tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
+ queue_entry);
+ list_del_init(&tmf->queue_entry);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ tmf->tpg = tpg;
+ tmf->vhost = vs;
+ tmf->svq = svq;
+ tmf->resp_iov = vq->iov[vc->out];
+ tmf->vq_desc = vc->head;
+ tmf->in_iovs = vc->in;
+ tmf->inflight = vhost_scsi_get_inflight(vq);
+
+ if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
+ vhost_buf_to_lun(vtmf->lun), NULL,
+ TMR_LUN_RESET, GFP_KERNEL, 0,
+ TARGET_SCF_ACK_KREF) < 0) {
+ vhost_scsi_release_tmf_res(tmf);
+ goto send_reject;
+ }
+
+ return;
+
+send_reject:
+ vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
+ VIRTIO_SCSI_S_FUNCTION_REJECTED);
+}
+
+static void
+vhost_scsi_send_an_resp(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc)
+{
+ struct virtio_scsi_ctrl_an_resp rsp;
+ struct iov_iter iov_iter;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
+ rsp.response = VIRTIO_SCSI_S_OK;
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
+}
+
+static void
+vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_tpg *tpg;
+ union {
+ __virtio32 type;
+ struct virtio_scsi_ctrl_an_req an;
+ struct virtio_scsi_ctrl_tmf_req tmf;
+ } v_req;
+ struct vhost_scsi_ctx vc;
+ size_t typ_size;
+ int ret, c = 0;
+
+ mutex_lock(&vq->mutex);
+ /*
+ * We can handle the vq only after the endpoint is setup by calling the
+ * VHOST_SCSI_SET_ENDPOINT ioctl.
+ */
+ if (!vhost_vq_get_backend(vq))
+ goto out;
+
+ memset(&vc, 0, sizeof(vc));
+
+ vhost_disable_notify(&vs->dev, vq);
+
+ do {
+ ret = vhost_scsi_get_desc(vs, vq, &vc);
+ if (ret)
+ goto err;
+
+ /*
+ * Get the request type first in order to setup
+ * other parameters dependent on the type.
+ */
+ vc.req = &v_req.type;
+ typ_size = sizeof(v_req.type);
+
+ if (unlikely(!copy_from_iter_full(vc.req, typ_size,
+ &vc.out_iter))) {
+ vq_err(vq, "Faulted on copy_from_iter tmf type\n");
+ /*
+ * The size of the response buffer depends on the
+ * request type and must be validated against it.
+ * Since the request type is not known, don't send
+ * a response.
+ */
+ continue;
+ }
+
+ switch (vhost32_to_cpu(vq, v_req.type)) {
+ case VIRTIO_SCSI_T_TMF:
+ vc.req = &v_req.tmf;
+ vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
+ vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
+ vc.lunp = &v_req.tmf.lun[0];
+ vc.target = &v_req.tmf.lun[1];
+ break;
+ case VIRTIO_SCSI_T_AN_QUERY:
+ case VIRTIO_SCSI_T_AN_SUBSCRIBE:
+ vc.req = &v_req.an;
+ vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
+ vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
+ vc.lunp = &v_req.an.lun[0];
+ vc.target = NULL;
+ break;
+ default:
+ vq_err(vq, "Unknown control request %d", v_req.type);
+ continue;
+ }
+
+ /*
+ * Validate the size of request and response buffers.
+ * Check for a sane response buffer so we can report
+ * early errors back to the guest.
+ */
+ ret = vhost_scsi_chk_size(vq, &vc);
+ if (ret)
+ goto err;
+
+ /*
+ * Get the rest of the request now that its size is known.
+ */
+ vc.req += typ_size;
+ vc.req_size -= typ_size;
+
+ ret = vhost_scsi_get_req(vq, &vc, &tpg);
+ if (ret)
+ goto err;
+
+ if (v_req.type == VIRTIO_SCSI_T_TMF)
+ vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
+ else
+ vhost_scsi_send_an_resp(vs, vq, &vc);
+err:
+ /*
+ * ENXIO: No more requests, or read error, wait for next kick
+ * EINVAL: Invalid response buffer, drop the request
+ * EIO: Respond with bad target
+ * EAGAIN: Pending request
+ */
+ if (ret == -ENXIO)
+ break;
+ else if (ret == -EIO)
+ vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+ pr_debug("%s: The handling func for control queue.\n", __func__);
+ vhost_scsi_ctl_handle_vq(vs, vq);
+}
+
+static void
+vhost_scsi_send_evt(struct vhost_scsi *vs,
+ struct vhost_scsi_tpg *tpg,
+ struct se_lun *lun,
+ u32 event,
+ u32 reason)
+{
+ struct vhost_scsi_evt *evt;
+
+ evt = vhost_scsi_allocate_evt(vs, event, reason);
+ if (!evt)
+ return;
+
+ if (tpg && lun) {
+ /* TODO: share lun setup code with virtio-scsi.ko */
+ /*
+ * Note: evt->event is zeroed when we allocate it and
+ * lun[4-7] need to be zero according to virtio-scsi spec.
+ */
+ evt->event.lun[0] = 0x01;
+ evt->event.lun[1] = tpg->tport_tpgt;
+ if (lun->unpacked_lun >= 256)
+ evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
+ evt->event.lun[3] = lun->unpacked_lun & 0xFF;
+ }
+
+ llist_add(&evt->list, &vs->vs_event_list);
+ vhost_work_queue(&vs->dev, &vs->vs_event_work);
+}
+
+static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+ mutex_lock(&vq->mutex);
+ if (!vhost_vq_get_backend(vq))
+ goto out;
+
+ if (vs->vs_events_missed)
+ vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void vhost_scsi_handle_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+ vhost_scsi_handle_vq(vs, vq);
+}
+
+/* Callers must hold dev mutex */
+static void vhost_scsi_flush(struct vhost_scsi *vs)
+{
+ int i;
+
+ /* Init new inflight and remember the old inflight */
+ vhost_scsi_init_inflight(vs, vs->old_inflight);
+
+ /*
+ * The inflight->kref was initialized to 1. We decrement it here to
+ * indicate the start of the flush operation so that it will reach 0
+ * when all the reqs are finished.
+ */
+ for (i = 0; i < vs->dev.nvqs; i++)
+ kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
+
+ /* Flush both the vhost poll and vhost work */
+ vhost_dev_flush(&vs->dev);
+
+ /* Wait for all reqs issued before the flush to be finished */
+ for (i = 0; i < vs->dev.nvqs; i++)
+ wait_for_completion(&vs->old_inflight[i]->comp);
+}
+
+static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (!svq->scsi_cmds)
+ return;
+
+ for (i = 0; i < svq->max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+
+ kfree(tv_cmd->tvc_sgl);
+ kfree(tv_cmd->tvc_prot_sgl);
+ kfree(tv_cmd->tvc_upages);
+ kfree(tv_cmd->tvc_resp_iov);
+ }
+
+ sbitmap_free(&svq->scsi_tags);
+ kfree(svq->scsi_cmds);
+ svq->scsi_cmds = NULL;
+}
+
+static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (svq->scsi_cmds)
+ return 0;
+
+ if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
+ NUMA_NO_NODE, false, true))
+ return -ENOMEM;
+ svq->max_cmds = max_cmds;
+
+ svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
+ if (!svq->scsi_cmds) {
+ sbitmap_free(&svq->scsi_tags);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+
+ tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_sgl) {
+ pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
+ goto out;
+ }
+
+ tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_upages) {
+ pr_err("Unable to allocate tv_cmd->tvc_upages\n");
+ goto out;
+ }
+
+ tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
+ sizeof(struct iovec),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_resp_iov) {
+ pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
+ goto out;
+ }
+
+ tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_prot_sgl) {
+ pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
+ goto out;
+ }
+ }
+ return 0;
+out:
+ vhost_scsi_destroy_vq_cmds(vq);
+ return -ENOMEM;
+}
+
+/*
+ * Called from vhost_scsi_ioctl() context to walk the list of available
+ * vhost_scsi_tpg with an active struct vhost_scsi_nexus
+ *
+ * The lock nesting rule is:
+ * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
+ */
+static int
+vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ struct vhost_scsi_target *t)
+{
+ struct se_portal_group *se_tpg;
+ struct vhost_scsi_tport *tv_tport;
+ struct vhost_scsi_tpg *tpg;
+ struct vhost_scsi_tpg **vs_tpg;
+ struct vhost_virtqueue *vq;
+ int index, ret, i, len;
+ bool match = false;
+
+ mutex_lock(&vhost_scsi_mutex);
+ mutex_lock(&vs->dev.mutex);
+
+ /* Verify that ring has been setup correctly. */
+ for (index = 0; index < vs->dev.nvqs; ++index) {
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
+ vs_tpg = kzalloc(len, GFP_KERNEL);
+ if (!vs_tpg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (vs->vs_tpg)
+ memcpy(vs_tpg, vs->vs_tpg, len);
+
+ list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+ if (!tpg->tpg_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ continue;
+ }
+ if (tpg->tv_tpg_vhost_count != 0) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ continue;
+ }
+ tv_tport = tpg->tport;
+
+ if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
+ if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ ret = -EEXIST;
+ goto undepend;
+ }
+ /*
+ * In order to ensure individual vhost-scsi configfs
+ * groups cannot be removed while in use by vhost ioctl,
+ * go ahead and take an explicit se_tpg->tpg_group.cg_item
+ * dependency now.
+ */
+ se_tpg = &tpg->se_tpg;
+ ret = target_depend_item(&se_tpg->tpg_group.cg_item);
+ if (ret) {
+ pr_warn("target_depend_item() failed: %d\n", ret);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ goto undepend;
+ }
+ tpg->tv_tpg_vhost_count++;
+ tpg->vhost_scsi = vs;
+ vs_tpg[tpg->tport_tpgt] = tpg;
+ match = true;
+ }
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ }
+
+ if (match) {
+ memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
+ sizeof(vs->vs_vhost_wwpn));
+
+ for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+ if (!vhost_vq_is_setup(vq))
+ continue;
+
+ ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
+ if (ret)
+ goto destroy_vq_cmds;
+ }
+
+ for (i = 0; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+ mutex_lock(&vq->mutex);
+ vhost_vq_set_backend(vq, vs_tpg);
+ vhost_vq_init_access(vq);
+ mutex_unlock(&vq->mutex);
+ }
+ ret = 0;
+ } else {
+ ret = -EEXIST;
+ }
+
+ /*
+ * Act as synchronize_rcu to make sure access to
+ * old vs->vs_tpg is finished.
+ */
+ vhost_scsi_flush(vs);
+ kfree(vs->vs_tpg);
+ vs->vs_tpg = vs_tpg;
+ goto out;
+
+destroy_vq_cmds:
+ for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
+ if (!vhost_vq_get_backend(&vs->vqs[i].vq))
+ vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
+ }
+undepend:
+ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
+ tpg = vs_tpg[i];
+ if (tpg) {
+ tpg->tv_tpg_vhost_count--;
+ target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
+ }
+ }
+ kfree(vs_tpg);
+out:
+ mutex_unlock(&vs->dev.mutex);
+ mutex_unlock(&vhost_scsi_mutex);
+ return ret;
+}
+
+static int
+vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
+ struct vhost_scsi_target *t)
+{
+ struct se_portal_group *se_tpg;
+ struct vhost_scsi_tport *tv_tport;
+ struct vhost_scsi_tpg *tpg;
+ struct vhost_virtqueue *vq;
+ bool match = false;
+ int index, ret, i;
+ u8 target;
+
+ mutex_lock(&vhost_scsi_mutex);
+ mutex_lock(&vs->dev.mutex);
+ /* Verify that ring has been setup correctly. */
+ for (index = 0; index < vs->dev.nvqs; ++index) {
+ if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
+ ret = -EFAULT;
+ goto err_dev;
+ }
+ }
+
+ if (!vs->vs_tpg) {
+ ret = 0;
+ goto err_dev;
+ }
+
+ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
+ target = i;
+ tpg = vs->vs_tpg[target];
+ if (!tpg)
+ continue;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tv_tport = tpg->tport;
+ if (!tv_tport) {
+ ret = -ENODEV;
+ goto err_tpg;
+ }
+
+ if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
+ pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
+ " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
+ tv_tport->tport_name, tpg->tport_tpgt,
+ t->vhost_wwpn, t->vhost_tpgt);
+ ret = -EINVAL;
+ goto err_tpg;
+ }
+ tpg->tv_tpg_vhost_count--;
+ tpg->vhost_scsi = NULL;
+ vs->vs_tpg[target] = NULL;
+ match = true;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ /*
+ * Release se_tpg->tpg_group.cg_item configfs dependency now
+ * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
+ */
+ se_tpg = &tpg->se_tpg;
+ target_undepend_item(&se_tpg->tpg_group.cg_item);
+ }
+ if (match) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+ mutex_lock(&vq->mutex);
+ vhost_vq_set_backend(vq, NULL);
+ mutex_unlock(&vq->mutex);
+ }
+ /* Make sure cmds are not running before tearing them down. */
+ vhost_scsi_flush(vs);
+
+ for (i = 0; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+ vhost_scsi_destroy_vq_cmds(vq);
+ }
+ }
+ /*
+ * Act as synchronize_rcu to make sure access to
+ * old vs->vs_tpg is finished.
+ */
+ vhost_scsi_flush(vs);
+ kfree(vs->vs_tpg);
+ vs->vs_tpg = NULL;
+ WARN_ON(vs->vs_events_nr);
+ mutex_unlock(&vs->dev.mutex);
+ mutex_unlock(&vhost_scsi_mutex);
+ return 0;
+
+err_tpg:
+ mutex_unlock(&tpg->tv_tpg_mutex);
+err_dev:
+ mutex_unlock(&vs->dev.mutex);
+ mutex_unlock(&vhost_scsi_mutex);
+ return ret;
+}
+
+static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ if (features & ~VHOST_SCSI_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vs->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vs->dev)) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&vs->dev.mutex);
+ return 0;
+}
+
+static int vhost_scsi_open(struct inode *inode, struct file *f)
+{
+ struct vhost_scsi *vs;
+ struct vhost_virtqueue **vqs;
+ int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
+
+ vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
+ if (!vs)
+ goto err_vs;
+
+ if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
+ pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
+ VHOST_SCSI_MAX_IO_VQ);
+ nvqs = VHOST_SCSI_MAX_IO_VQ;
+ } else if (nvqs == 0) {
+ pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
+ nvqs = 1;
+ }
+ nvqs += VHOST_SCSI_VQ_IO;
+
+ vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
+ if (!vs->compl_bitmap)
+ goto err_compl_bitmap;
+
+ vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->old_inflight)
+ goto err_inflight;
+
+ vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->vqs)
+ goto err_vqs;
+
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_local_vqs;
+
+ vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
+ vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
+
+ vs->vs_events_nr = 0;
+ vs->vs_events_missed = false;
+
+ vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
+ vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+ vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
+ vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
+ for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
+ vqs[i] = &vs->vqs[i].vq;
+ vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
+ }
+ vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
+ VHOST_SCSI_WEIGHT, 0, true, NULL);
+
+ vhost_scsi_init_inflight(vs, NULL);
+
+ f->private_data = vs;
+ return 0;
+
+err_local_vqs:
+ kfree(vs->vqs);
+err_vqs:
+ kfree(vs->old_inflight);
+err_inflight:
+ bitmap_free(vs->compl_bitmap);
+err_compl_bitmap:
+ kvfree(vs);
+err_vs:
+ return r;
+}
+
+static int vhost_scsi_release(struct inode *inode, struct file *f)
+{
+ struct vhost_scsi *vs = f->private_data;
+ struct vhost_scsi_target t;
+
+ mutex_lock(&vs->dev.mutex);
+ memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
+ mutex_unlock(&vs->dev.mutex);
+ vhost_scsi_clear_endpoint(vs, &t);
+ vhost_dev_stop(&vs->dev);
+ vhost_dev_cleanup(&vs->dev);
+ kfree(vs->dev.vqs);
+ kfree(vs->vqs);
+ kfree(vs->old_inflight);
+ bitmap_free(vs->compl_bitmap);
+ kvfree(vs);
+ return 0;
+}
+
+static long
+vhost_scsi_ioctl(struct file *f,
+ unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_scsi *vs = f->private_data;
+ struct vhost_scsi_target backend;
+ void __user *argp = (void __user *)arg;
+ u64 __user *featurep = argp;
+ u32 __user *eventsp = argp;
+ u32 events_missed;
+ u64 features;
+ int r, abi_version = VHOST_SCSI_ABI_VERSION;
+ struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+
+ switch (ioctl) {
+ case VHOST_SCSI_SET_ENDPOINT:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ if (backend.reserved != 0)
+ return -EOPNOTSUPP;
+
+ return vhost_scsi_set_endpoint(vs, &backend);
+ case VHOST_SCSI_CLEAR_ENDPOINT:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ if (backend.reserved != 0)
+ return -EOPNOTSUPP;
+
+ return vhost_scsi_clear_endpoint(vs, &backend);
+ case VHOST_SCSI_GET_ABI_VERSION:
+ if (copy_to_user(argp, &abi_version, sizeof abi_version))
+ return -EFAULT;
+ return 0;
+ case VHOST_SCSI_SET_EVENTS_MISSED:
+ if (get_user(events_missed, eventsp))
+ return -EFAULT;
+ mutex_lock(&vq->mutex);
+ vs->vs_events_missed = events_missed;
+ mutex_unlock(&vq->mutex);
+ return 0;
+ case VHOST_SCSI_GET_EVENTS_MISSED:
+ mutex_lock(&vq->mutex);
+ events_missed = vs->vs_events_missed;
+ mutex_unlock(&vq->mutex);
+ if (put_user(events_missed, eventsp))
+ return -EFAULT;
+ return 0;
+ case VHOST_GET_FEATURES:
+ features = VHOST_SCSI_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof features))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof features))
+ return -EFAULT;
+ return vhost_scsi_set_features(vs, features);
+ default:
+ mutex_lock(&vs->dev.mutex);
+ r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
+ /* TODO: flush backend after dev ioctl. */
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
+ mutex_unlock(&vs->dev.mutex);
+ return r;
+ }
+}
+
+static const struct file_operations vhost_scsi_fops = {
+ .owner = THIS_MODULE,
+ .release = vhost_scsi_release,
+ .unlocked_ioctl = vhost_scsi_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .open = vhost_scsi_open,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice vhost_scsi_misc = {
+ MISC_DYNAMIC_MINOR,
+ "vhost-scsi",
+ &vhost_scsi_fops,
+};
+
+static int __init vhost_scsi_register(void)
+{
+ return misc_register(&vhost_scsi_misc);
+}
+
+static void vhost_scsi_deregister(void)
+{
+ misc_deregister(&vhost_scsi_misc);
+}
+
+static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
+{
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return "SAS";
+ case SCSI_PROTOCOL_FCP:
+ return "FCP";
+ case SCSI_PROTOCOL_ISCSI:
+ return "iSCSI";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+static void
+vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
+ struct se_lun *lun, bool plug)
+{
+
+ struct vhost_scsi *vs = tpg->vhost_scsi;
+ struct vhost_virtqueue *vq;
+ u32 reason;
+
+ if (!vs)
+ return;
+
+ mutex_lock(&vs->dev.mutex);
+
+ if (plug)
+ reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
+ else
+ reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
+
+ vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+ mutex_lock(&vq->mutex);
+ if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
+ vhost_scsi_send_evt(vs, tpg, lun,
+ VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
+ mutex_unlock(&vq->mutex);
+ mutex_unlock(&vs->dev.mutex);
+}
+
+static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
+{
+ vhost_scsi_do_plug(tpg, lun, true);
+}
+
+static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
+{
+ vhost_scsi_do_plug(tpg, lun, false);
+}
+
+static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
+ struct se_lun *lun)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tmf *tmf;
+
+ tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
+ if (!tmf)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&tmf->queue_entry);
+ vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
+
+ mutex_lock(&vhost_scsi_mutex);
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_port_count++;
+ list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ vhost_scsi_hotplug(tpg, lun);
+
+ mutex_unlock(&vhost_scsi_mutex);
+
+ return 0;
+}
+
+static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
+ struct se_lun *lun)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tmf *tmf;
+
+ mutex_lock(&vhost_scsi_mutex);
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_port_count--;
+ tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
+ queue_entry);
+ list_del(&tmf->queue_entry);
+ kfree(tmf);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ vhost_scsi_hotunplug(tpg, lun);
+
+ mutex_unlock(&vhost_scsi_mutex);
+}
+
+static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
+ struct config_item *item, const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
+
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
+ }
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tpg->tv_fabric_prot_type = val;
+
+ return count;
+}
+
+static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
+ struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+
+ return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
+}
+
+CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
+
+static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
+ &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
+ NULL,
+};
+
+static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
+ const char *name)
+{
+ struct vhost_scsi_nexus *tv_nexus;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ if (tpg->tpg_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_debug("tpg->tpg_nexus already exists\n");
+ return -EEXIST;
+ }
+
+ tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
+ if (!tv_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to allocate struct vhost_scsi_nexus\n");
+ return -ENOMEM;
+ }
+ /*
+ * Since we are running in 'demo mode' this call with generate a
+ * struct se_node_acl for the vhost_scsi struct se_portal_group with
+ * the SCSI Initiator port name of the passed configfs group 'name'.
+ */
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
+ TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
+ (unsigned char *)name, tv_nexus, NULL);
+ if (IS_ERR(tv_nexus->tvn_se_sess)) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ kfree(tv_nexus);
+ return -ENOMEM;
+ }
+ tpg->tpg_nexus = tv_nexus;
+
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return 0;
+}
+
+static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
+{
+ struct se_session *se_sess;
+ struct vhost_scsi_nexus *tv_nexus;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+
+ se_sess = tv_nexus->tvn_se_sess;
+ if (!se_sess) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+
+ if (tpg->tv_tpg_port_count != 0) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to remove TCM_vhost I_T Nexus with"
+ " active TPG port count: %d\n",
+ tpg->tv_tpg_port_count);
+ return -EBUSY;
+ }
+
+ if (tpg->tv_tpg_vhost_count != 0) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to remove TCM_vhost I_T Nexus with"
+ " active TPG vhost count: %d\n",
+ tpg->tv_tpg_vhost_count);
+ return -EBUSY;
+ }
+
+ pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
+ " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+
+ /*
+ * Release the SCSI I_T Nexus to the emulated vhost Target Port
+ */
+ target_remove_session(se_sess);
+ tpg->tpg_nexus = NULL;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ kfree(tv_nexus);
+ return 0;
+}
+
+static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_nexus *tv_nexus;
+ ssize_t ret;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%s\n",
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ return ret;
+}
+
+static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tport *tport_wwn = tpg->tport;
+ unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
+ int ret;
+ /*
+ * Shutdown the active I_T nexus if 'NULL' is passed..
+ */
+ if (!strncmp(page, "NULL", 4)) {
+ ret = vhost_scsi_drop_nexus(tpg);
+ return (!ret) ? count : ret;
+ }
+ /*
+ * Otherwise make sure the passed virtual Initiator port WWN matches
+ * the fabric protocol_id set in vhost_scsi_make_tport(), and call
+ * vhost_scsi_make_nexus().
+ */
+ if (strlen(page) >= VHOST_SCSI_NAMELEN) {
+ pr_err("Emulated NAA Sas Address: %s, exceeds"
+ " max: %d\n", page, VHOST_SCSI_NAMELEN);
+ return -EINVAL;
+ }
+ snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
+
+ ptr = strstr(i_port, "naa.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
+ pr_err("Passed SAS Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ vhost_scsi_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "fc.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
+ pr_err("Passed FCP Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ vhost_scsi_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[3]; /* Skip over "fc." */
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "iqn.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
+ pr_err("Passed iSCSI Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ vhost_scsi_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ pr_err("Unable to locate prefix for emulated Initiator Port:"
+ " %s\n", i_port);
+ return -EINVAL;
+ /*
+ * Clear any trailing newline for the NAA WWN
+ */
+check_newline:
+ if (i_port[strlen(i_port)-1] == '\n')
+ i_port[strlen(i_port)-1] = '\0';
+
+ ret = vhost_scsi_make_nexus(tpg, port_ptr);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
+
+static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
+ &vhost_scsi_tpg_attr_nexus,
+ NULL,
+};
+
+static struct se_portal_group *
+vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
+{
+ struct vhost_scsi_tport *tport = container_of(wwn,
+ struct vhost_scsi_tport, tport_wwn);
+
+ struct vhost_scsi_tpg *tpg;
+ u16 tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct vhost_scsi_tpg");
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&tpg->tv_tpg_mutex);
+ INIT_LIST_HEAD(&tpg->tv_tpg_list);
+ INIT_LIST_HEAD(&tpg->tmf_queue);
+ tpg->tport = tport;
+ tpg->tport_tpgt = tpgt;
+
+ ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ mutex_lock(&vhost_scsi_mutex);
+ list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
+ mutex_unlock(&vhost_scsi_mutex);
+
+ return &tpg->se_tpg;
+}
+
+static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+
+ mutex_lock(&vhost_scsi_mutex);
+ list_del(&tpg->tv_tpg_list);
+ mutex_unlock(&vhost_scsi_mutex);
+ /*
+ * Release the virtual I_T Nexus for this vhost TPG
+ */
+ vhost_scsi_drop_nexus(tpg);
+ /*
+ * Deregister the se_tpg from TCM..
+ */
+ core_tpg_deregister(se_tpg);
+ kfree(tpg);
+}
+
+static struct se_wwn *
+vhost_scsi_make_tport(struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct vhost_scsi_tport *tport;
+ char *ptr;
+ u64 wwpn = 0;
+ int off = 0;
+
+ /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL); */
+
+ tport = kzalloc(sizeof(*tport), GFP_KERNEL);
+ if (!tport) {
+ pr_err("Unable to allocate struct vhost_scsi_tport");
+ return ERR_PTR(-ENOMEM);
+ }
+ tport->tport_wwpn = wwpn;
+ /*
+ * Determine the emulated Protocol Identifier and Target Port Name
+ * based on the incoming configfs directory name.
+ */
+ ptr = strstr(name, "naa.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_SAS;
+ goto check_len;
+ }
+ ptr = strstr(name, "fc.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_FCP;
+ off = 3; /* Skip over "fc." */
+ goto check_len;
+ }
+ ptr = strstr(name, "iqn.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
+ goto check_len;
+ }
+
+ pr_err("Unable to locate prefix for emulated Target Port:"
+ " %s\n", name);
+ kfree(tport);
+ return ERR_PTR(-EINVAL);
+
+check_len:
+ if (strlen(name) >= VHOST_SCSI_NAMELEN) {
+ pr_err("Emulated %s Address: %s, exceeds"
+ " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
+ VHOST_SCSI_NAMELEN);
+ kfree(tport);
+ return ERR_PTR(-EINVAL);
+ }
+ snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
+
+ pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
+ " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
+
+ return &tport->tport_wwn;
+}
+
+static void vhost_scsi_drop_tport(struct se_wwn *wwn)
+{
+ struct vhost_scsi_tport *tport = container_of(wwn,
+ struct vhost_scsi_tport, tport_wwn);
+
+ pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
+ " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
+ tport->tport_name);
+
+ kfree(tport);
+}
+
+static ssize_t
+vhost_scsi_wwn_version_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
+ "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
+ utsname()->machine);
+}
+
+CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
+
+static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
+ &vhost_scsi_wwn_attr_version,
+ NULL,
+};
+
+static const struct target_core_fabric_ops vhost_scsi_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "vhost",
+ .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
+ .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
+ .tpg_get_tag = vhost_scsi_get_tpgt,
+ .tpg_check_demo_mode = vhost_scsi_check_true,
+ .tpg_check_demo_mode_cache = vhost_scsi_check_true,
+ .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
+ .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
+ .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
+ .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
+ .release_cmd = vhost_scsi_release_cmd,
+ .check_stop_free = vhost_scsi_check_stop_free,
+ .sess_get_index = vhost_scsi_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = vhost_scsi_write_pending,
+ .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
+ .get_cmd_state = vhost_scsi_get_cmd_state,
+ .queue_data_in = vhost_scsi_queue_data_in,
+ .queue_status = vhost_scsi_queue_status,
+ .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
+ .aborted_task = vhost_scsi_aborted_task,
+ /*
+ * Setup callers for generic logic in target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = vhost_scsi_make_tport,
+ .fabric_drop_wwn = vhost_scsi_drop_tport,
+ .fabric_make_tpg = vhost_scsi_make_tpg,
+ .fabric_drop_tpg = vhost_scsi_drop_tpg,
+ .fabric_post_link = vhost_scsi_port_link,
+ .fabric_pre_unlink = vhost_scsi_port_unlink,
+
+ .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
+ .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
+ .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
+};
+
+static int __init vhost_scsi_init(void)
+{
+ int ret = -ENOMEM;
+
+ pr_debug("TCM_VHOST fabric module %s on %s/%s"
+ " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
+ utsname()->machine);
+
+ ret = vhost_scsi_register();
+ if (ret < 0)
+ goto out;
+
+ ret = target_register_template(&vhost_scsi_ops);
+ if (ret < 0)
+ goto out_vhost_scsi_deregister;
+
+ return 0;
+
+out_vhost_scsi_deregister:
+ vhost_scsi_deregister();
+out:
+ return ret;
+};
+
+static void vhost_scsi_exit(void)
+{
+ target_unregister_template(&vhost_scsi_ops);
+ vhost_scsi_deregister();
+};
+
+MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
+MODULE_ALIAS("tcm_vhost");
+MODULE_LICENSE("GPL");
+module_init(vhost_scsi_init);
+module_exit(vhost_scsi_exit);
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
new file mode 100644
index 000000000..bc8e7fb1e
--- /dev/null
+++ b/drivers/vhost/test.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * test virtio server in host kernel.
+ */
+
+#include <linux/compat.h>
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+
+#include "test.h"
+#include "vhost.h"
+
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_TEST_WEIGHT 0x80000
+
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * pkts.
+ */
+#define VHOST_TEST_PKT_WEIGHT 256
+
+enum {
+ VHOST_TEST_VQ = 0,
+ VHOST_TEST_VQ_MAX = 1,
+};
+
+struct vhost_test {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
+};
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_vq(struct vhost_test *n)
+{
+ struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
+ unsigned out, in;
+ int head;
+ size_t len, total_len = 0;
+ void *private;
+
+ mutex_lock(&vq->mutex);
+ private = vhost_vq_get_backend(vq);
+ if (!private) {
+ mutex_unlock(&vq->mutex);
+ return;
+ }
+
+ vhost_disable_notify(&n->dev, vq);
+
+ for (;;) {
+ head = vhost_get_vq_desc(vq, vq->iov,
+ ARRAY_SIZE(vq->iov),
+ &out, &in,
+ NULL, NULL);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(&n->dev, vq))) {
+ vhost_disable_notify(&n->dev, vq);
+ continue;
+ }
+ break;
+ }
+ if (in) {
+ vq_err(vq, "Unexpected descriptor format for TX: "
+ "out %d, int %d\n", out, in);
+ break;
+ }
+ len = iov_length(vq->iov, out);
+ /* Sanity check */
+ if (!len) {
+ vq_err(vq, "Unexpected 0 len for TX\n");
+ break;
+ }
+ vhost_add_used_and_signal(&n->dev, vq, head, 0);
+ total_len += len;
+ if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
+ break;
+ }
+
+ mutex_unlock(&vq->mutex);
+}
+
+static void handle_vq_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
+
+ handle_vq(n);
+}
+
+static int vhost_test_open(struct inode *inode, struct file *f)
+{
+ struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
+ struct vhost_dev *dev;
+ struct vhost_virtqueue **vqs;
+
+ if (!n)
+ return -ENOMEM;
+ vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ kfree(n);
+ return -ENOMEM;
+ }
+
+ dev = &n->dev;
+ vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
+ n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
+ VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL);
+
+ f->private_data = n;
+
+ return 0;
+}
+
+static void *vhost_test_stop_vq(struct vhost_test *n,
+ struct vhost_virtqueue *vq)
+{
+ void *private;
+
+ mutex_lock(&vq->mutex);
+ private = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, NULL);
+ mutex_unlock(&vq->mutex);
+ return private;
+}
+
+static void vhost_test_stop(struct vhost_test *n, void **privatep)
+{
+ *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
+}
+
+static void vhost_test_flush(struct vhost_test *n)
+{
+ vhost_dev_flush(&n->dev);
+}
+
+static int vhost_test_release(struct inode *inode, struct file *f)
+{
+ struct vhost_test *n = f->private_data;
+ void *private;
+
+ vhost_test_stop(n, &private);
+ vhost_test_flush(n);
+ vhost_dev_stop(&n->dev);
+ vhost_dev_cleanup(&n->dev);
+ kfree(n->dev.vqs);
+ kfree(n);
+ return 0;
+}
+
+static long vhost_test_run(struct vhost_test *n, int test)
+{
+ void *priv, *oldpriv;
+ struct vhost_virtqueue *vq;
+ int r, index;
+
+ if (test < 0 || test > 1)
+ return -EINVAL;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ for (index = 0; index < n->dev.nvqs; ++index) {
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(&n->vqs[index])) {
+ r = -EFAULT;
+ goto err;
+ }
+ }
+
+ for (index = 0; index < n->dev.nvqs; ++index) {
+ vq = n->vqs + index;
+ mutex_lock(&vq->mutex);
+ priv = test ? n : NULL;
+
+ /* start polling new socket */
+ oldpriv = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, priv);
+
+ r = vhost_vq_init_access(&n->vqs[index]);
+
+ mutex_unlock(&vq->mutex);
+
+ if (r)
+ goto err;
+
+ if (oldpriv) {
+ vhost_test_flush(n);
+ }
+ }
+
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
+static long vhost_test_reset_owner(struct vhost_test *n)
+{
+ void *priv = NULL;
+ long err;
+ struct vhost_iotlb *umem;
+
+ mutex_lock(&n->dev.mutex);
+ err = vhost_dev_check_owner(&n->dev);
+ if (err)
+ goto done;
+ umem = vhost_dev_reset_owner_prepare();
+ if (!umem) {
+ err = -ENOMEM;
+ goto done;
+ }
+ vhost_test_stop(n, &priv);
+ vhost_test_flush(n);
+ vhost_dev_stop(&n->dev);
+ vhost_dev_reset_owner(&n->dev, umem);
+done:
+ mutex_unlock(&n->dev.mutex);
+ return err;
+}
+
+static int vhost_test_set_features(struct vhost_test *n, u64 features)
+{
+ struct vhost_virtqueue *vq;
+
+ mutex_lock(&n->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&n->dev)) {
+ mutex_unlock(&n->dev.mutex);
+ return -EFAULT;
+ }
+ vq = &n->vqs[VHOST_TEST_VQ];
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+}
+
+static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
+{
+ static void *backend;
+
+ const bool enable = fd != -1;
+ struct vhost_virtqueue *vq;
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ if (index >= VHOST_TEST_VQ_MAX) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ vq = &n->vqs[index];
+ mutex_lock(&vq->mutex);
+
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(vq)) {
+ r = -EFAULT;
+ goto err_vq;
+ }
+ if (!enable) {
+ vhost_poll_stop(&vq->poll);
+ backend = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, NULL);
+ } else {
+ vhost_vq_set_backend(vq, backend);
+ r = vhost_vq_init_access(vq);
+ if (r == 0)
+ r = vhost_poll_start(&vq->poll, vq->kick);
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ if (enable) {
+ vhost_test_flush(n);
+ }
+
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
+err_vq:
+ mutex_unlock(&vq->mutex);
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
+static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_vring_file backend;
+ struct vhost_test *n = f->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 __user *featurep = argp;
+ int test;
+ u64 features;
+ int r;
+ switch (ioctl) {
+ case VHOST_TEST_RUN:
+ if (copy_from_user(&test, argp, sizeof test))
+ return -EFAULT;
+ return vhost_test_run(n, test);
+ case VHOST_TEST_SET_BACKEND:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ return vhost_test_set_backend(n, backend.index, backend.fd);
+ case VHOST_GET_FEATURES:
+ features = VHOST_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof features))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ printk(KERN_ERR "1\n");
+ if (copy_from_user(&features, featurep, sizeof features))
+ return -EFAULT;
+ printk(KERN_ERR "2\n");
+ if (features & ~VHOST_FEATURES)
+ return -EOPNOTSUPP;
+ printk(KERN_ERR "3\n");
+ return vhost_test_set_features(n, features);
+ case VHOST_RESET_OWNER:
+ return vhost_test_reset_owner(n);
+ default:
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_ioctl(&n->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vring_ioctl(&n->dev, ioctl, argp);
+ vhost_test_flush(n);
+ mutex_unlock(&n->dev.mutex);
+ return r;
+ }
+}
+
+static const struct file_operations vhost_test_fops = {
+ .owner = THIS_MODULE,
+ .release = vhost_test_release,
+ .unlocked_ioctl = vhost_test_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .open = vhost_test_open,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice vhost_test_misc = {
+ MISC_DYNAMIC_MINOR,
+ "vhost-test",
+ &vhost_test_fops,
+};
+module_misc_device(vhost_test_misc);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael S. Tsirkin");
+MODULE_DESCRIPTION("Host kernel side for virtio simulator");
diff --git a/drivers/vhost/test.h b/drivers/vhost/test.h
new file mode 100644
index 000000000..822bc4bee
--- /dev/null
+++ b/drivers/vhost/test.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_VHOST_TEST_H
+#define LINUX_VHOST_TEST_H
+
+/* Start a given test on the virtio null device. 0 stops all tests. */
+#define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
+#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int)
+
+#endif
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
new file mode 100644
index 000000000..c8374527a
--- /dev/null
+++ b/drivers/vhost/vdpa.c
@@ -0,0 +1,1490 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2020 Intel Corporation.
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Author: Tiwei Bie <tiwei.bie@intel.com>
+ * Jason Wang <jasowang@redhat.com>
+ *
+ * Thanks Michael S. Tsirkin for the valuable comments and
+ * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
+ * their supports.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/uuid.h>
+#include <linux/vdpa.h>
+#include <linux/nospec.h>
+#include <linux/vhost.h>
+
+#include "vhost.h"
+
+enum {
+ VHOST_VDPA_BACKEND_FEATURES =
+ (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
+ (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
+ (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
+};
+
+#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
+
+#define VHOST_VDPA_IOTLB_BUCKETS 16
+
+struct vhost_vdpa_as {
+ struct hlist_node hash_link;
+ struct vhost_iotlb iotlb;
+ u32 id;
+};
+
+struct vhost_vdpa {
+ struct vhost_dev vdev;
+ struct iommu_domain *domain;
+ struct vhost_virtqueue *vqs;
+ struct completion completion;
+ struct vdpa_device *vdpa;
+ struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
+ struct device dev;
+ struct cdev cdev;
+ atomic_t opened;
+ u32 nvqs;
+ int virtio_id;
+ int minor;
+ struct eventfd_ctx *config_ctx;
+ int in_batch;
+ struct vdpa_iova_range range;
+ u32 batch_asid;
+};
+
+static DEFINE_IDA(vhost_vdpa_ida);
+
+static dev_t vhost_vdpa_major;
+
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb, u64 start,
+ u64 last, u32 asid);
+
+static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
+{
+ struct vhost_vdpa_as *as = container_of(iotlb, struct
+ vhost_vdpa_as, iotlb);
+ return as->id;
+}
+
+static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+ struct vhost_vdpa_as *as;
+
+ hlist_for_each_entry(as, head, hash_link)
+ if (as->id == asid)
+ return as;
+
+ return NULL;
+}
+
+static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (!as)
+ return NULL;
+
+ return &as->iotlb;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+ struct vhost_vdpa_as *as;
+
+ if (asid_to_as(v, asid))
+ return NULL;
+
+ if (asid >= v->vdpa->nas)
+ return NULL;
+
+ as = kmalloc(sizeof(*as), GFP_KERNEL);
+ if (!as)
+ return NULL;
+
+ vhost_iotlb_init(&as->iotlb, 0, 0);
+ as->id = asid;
+ hlist_add_head(&as->hash_link, head);
+
+ return as;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
+ u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (as)
+ return as;
+
+ return vhost_vdpa_alloc_as(v, asid);
+}
+
+static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (!as)
+ return -EINVAL;
+
+ hlist_del(&as->hash_link);
+ vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
+ kfree(as);
+
+ return 0;
+}
+
+static void handle_vq_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
+ const struct vdpa_config_ops *ops = v->vdpa->config;
+
+ ops->kick_vq(v->vdpa, vq - v->vqs);
+}
+
+static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
+{
+ struct vhost_virtqueue *vq = private;
+ struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
+
+ if (call_ctx)
+ eventfd_signal(call_ctx, 1);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vhost_vdpa_config_cb(void *private)
+{
+ struct vhost_vdpa *v = private;
+ struct eventfd_ctx *config_ctx = v->config_ctx;
+
+ if (config_ctx)
+ eventfd_signal(config_ctx, 1);
+
+ return IRQ_HANDLED;
+}
+
+static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+ struct vhost_virtqueue *vq = &v->vqs[qid];
+ const struct vdpa_config_ops *ops = v->vdpa->config;
+ struct vdpa_device *vdpa = v->vdpa;
+ int ret, irq;
+
+ if (!ops->get_vq_irq)
+ return;
+
+ irq = ops->get_vq_irq(vdpa, qid);
+ if (irq < 0)
+ return;
+
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+ if (!vq->call_ctx.ctx)
+ return;
+
+ vq->call_ctx.producer.token = vq->call_ctx.ctx;
+ vq->call_ctx.producer.irq = irq;
+ ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+ if (unlikely(ret))
+ dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
+ qid, vq->call_ctx.producer.token, ret);
+}
+
+static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+ struct vhost_virtqueue *vq = &v->vqs[qid];
+
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+}
+
+static int vhost_vdpa_reset(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ v->in_batch = 0;
+
+ return vdpa_reset(vdpa);
+}
+
+static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 device_id;
+
+ device_id = ops->get_device_id(vdpa);
+
+ if (copy_to_user(argp, &device_id, sizeof(device_id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u8 status;
+
+ status = ops->get_status(vdpa);
+
+ if (copy_to_user(statusp, &status, sizeof(status)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u8 status, status_old;
+ u32 nvqs = v->nvqs;
+ int ret;
+ u16 i;
+
+ if (copy_from_user(&status, statusp, sizeof(status)))
+ return -EFAULT;
+
+ status_old = ops->get_status(vdpa);
+
+ /*
+ * Userspace shouldn't remove status bits unless reset the
+ * status to 0.
+ */
+ if (status != 0 && (status_old & ~status) != 0)
+ return -EINVAL;
+
+ if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
+ for (i = 0; i < nvqs; i++)
+ vhost_vdpa_unsetup_vq_irq(v, i);
+
+ if (status == 0) {
+ ret = vdpa_reset(vdpa);
+ if (ret)
+ return ret;
+ } else
+ vdpa_set_status(vdpa, status);
+
+ if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
+ for (i = 0; i < nvqs; i++)
+ vhost_vdpa_setup_vq_irq(v, i);
+
+ return 0;
+}
+
+static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
+ struct vhost_vdpa_config *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ size_t size = vdpa->config->get_config_size(vdpa);
+
+ if (c->len == 0 || c->off > size)
+ return -EINVAL;
+
+ if (c->len > size - c->off)
+ return -E2BIG;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_config(struct vhost_vdpa *v,
+ struct vhost_vdpa_config __user *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ struct vhost_vdpa_config config;
+ unsigned long size = offsetof(struct vhost_vdpa_config, buf);
+ u8 *buf;
+
+ if (copy_from_user(&config, c, size))
+ return -EFAULT;
+ if (vhost_vdpa_config_validate(v, &config))
+ return -EINVAL;
+ buf = kvzalloc(config.len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ vdpa_get_config(vdpa, config.off, buf, config.len);
+
+ if (copy_to_user(c->buf, buf, config.len)) {
+ kvfree(buf);
+ return -EFAULT;
+ }
+
+ kvfree(buf);
+ return 0;
+}
+
+static long vhost_vdpa_set_config(struct vhost_vdpa *v,
+ struct vhost_vdpa_config __user *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ struct vhost_vdpa_config config;
+ unsigned long size = offsetof(struct vhost_vdpa_config, buf);
+ u8 *buf;
+
+ if (copy_from_user(&config, c, size))
+ return -EFAULT;
+ if (vhost_vdpa_config_validate(v, &config))
+ return -EINVAL;
+
+ buf = vmemdup_user(c->buf, config.len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ vdpa_set_config(vdpa, config.off, buf, config.len);
+
+ kvfree(buf);
+ return 0;
+}
+
+static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->suspend;
+}
+
+static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u64 features;
+
+ features = ops->get_device_features(vdpa);
+
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_dev *d = &v->vdev;
+ u64 actual_features;
+ u64 features;
+ int i;
+
+ /*
+ * It's not allowed to change the features after they have
+ * been negotiated.
+ */
+ if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
+ return -EBUSY;
+
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+
+ if (vdpa_set_features(vdpa, features))
+ return -EINVAL;
+
+ /* let the vqs know what has been configured */
+ actual_features = ops->get_driver_features(vdpa);
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq = d->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->acked_features = actual_features;
+ mutex_unlock(&vq->mutex);
+ }
+
+ return 0;
+}
+
+static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u16 num;
+
+ num = ops->get_vq_num_max(vdpa);
+
+ if (copy_to_user(argp, &num, sizeof(num)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void vhost_vdpa_config_put(struct vhost_vdpa *v)
+{
+ if (v->config_ctx) {
+ eventfd_ctx_put(v->config_ctx);
+ v->config_ctx = NULL;
+ }
+}
+
+static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vdpa_callback cb;
+ int fd;
+ struct eventfd_ctx *ctx;
+
+ cb.callback = vhost_vdpa_config_cb;
+ cb.private = v;
+ if (copy_from_user(&fd, argp, sizeof(fd)))
+ return -EFAULT;
+
+ ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
+ swap(ctx, v->config_ctx);
+
+ if (!IS_ERR_OR_NULL(ctx))
+ eventfd_ctx_put(ctx);
+
+ if (IS_ERR(v->config_ctx)) {
+ long ret = PTR_ERR(v->config_ctx);
+
+ v->config_ctx = NULL;
+ return ret;
+ }
+
+ v->vdpa->config->set_config_cb(v->vdpa, &cb);
+
+ return 0;
+}
+
+static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vhost_vdpa_iova_range range = {
+ .first = v->range.first,
+ .last = v->range.last,
+ };
+
+ if (copy_to_user(argp, &range, sizeof(range)))
+ return -EFAULT;
+ return 0;
+}
+
+static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 size;
+
+ size = ops->get_config_size(vdpa);
+
+ if (copy_to_user(argp, &size, sizeof(size)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* After a successful return of ioctl the device must not process more
+ * virtqueue descriptors. The device can answer to read or writes of config
+ * fields as if it were not suspended. In particular, writing to "queue_enable"
+ * with a value of 1 will not make the device start processing buffers.
+ */
+static long vhost_vdpa_suspend(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ops->suspend(vdpa);
+}
+
+static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
+ void __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_vq_state vq_state;
+ struct vdpa_callback cb;
+ struct vhost_virtqueue *vq;
+ struct vhost_vring_state s;
+ u32 idx;
+ long r;
+
+ r = get_user(idx, (u32 __user *)argp);
+ if (r < 0)
+ return r;
+
+ if (idx >= v->nvqs)
+ return -ENOBUFS;
+
+ idx = array_index_nospec(idx, v->nvqs);
+ vq = &v->vqs[idx];
+
+ switch (cmd) {
+ case VHOST_VDPA_SET_VRING_ENABLE:
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+ ops->set_vq_ready(vdpa, idx, s.num);
+ return 0;
+ case VHOST_VDPA_GET_VRING_GROUP:
+ if (!ops->get_vq_group)
+ return -EOPNOTSUPP;
+ s.index = idx;
+ s.num = ops->get_vq_group(vdpa, idx);
+ if (s.num >= vdpa->ngroups)
+ return -EIO;
+ else if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
+ case VHOST_VDPA_SET_GROUP_ASID:
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+ if (s.num >= vdpa->nas)
+ return -EINVAL;
+ if (!ops->set_group_asid)
+ return -EOPNOTSUPP;
+ return ops->set_group_asid(vdpa, idx, s.num);
+ case VHOST_GET_VRING_BASE:
+ r = ops->get_vq_state(v->vdpa, idx, &vq_state);
+ if (r)
+ return r;
+
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq->last_avail_idx = vq_state.packed.last_avail_idx |
+ (vq_state.packed.last_avail_counter << 15);
+ vq->last_used_idx = vq_state.packed.last_used_idx |
+ (vq_state.packed.last_used_counter << 15);
+ } else {
+ vq->last_avail_idx = vq_state.split.avail_index;
+ }
+ break;
+ }
+
+ r = vhost_vring_ioctl(&v->vdev, cmd, argp);
+ if (r)
+ return r;
+
+ switch (cmd) {
+ case VHOST_SET_VRING_ADDR:
+ if (ops->set_vq_address(vdpa, idx,
+ (u64)(uintptr_t)vq->desc,
+ (u64)(uintptr_t)vq->avail,
+ (u64)(uintptr_t)vq->used))
+ r = -EINVAL;
+ break;
+
+ case VHOST_SET_VRING_BASE:
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
+ vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
+ vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
+ vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
+ } else {
+ vq_state.split.avail_index = vq->last_avail_idx;
+ }
+ r = ops->set_vq_state(vdpa, idx, &vq_state);
+ break;
+
+ case VHOST_SET_VRING_CALL:
+ if (vq->call_ctx.ctx) {
+ cb.callback = vhost_vdpa_virtqueue_cb;
+ cb.private = vq;
+ } else {
+ cb.callback = NULL;
+ cb.private = NULL;
+ }
+ ops->set_vq_cb(vdpa, idx, &cb);
+ vhost_vdpa_setup_vq_irq(v, idx);
+ break;
+
+ case VHOST_SET_VRING_NUM:
+ ops->set_vq_num(vdpa, idx, vq->num);
+ break;
+ }
+
+ return r;
+}
+
+static long vhost_vdpa_unlocked_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vhost_vdpa *v = filep->private_data;
+ struct vhost_dev *d = &v->vdev;
+ void __user *argp = (void __user *)arg;
+ u64 __user *featurep = argp;
+ u64 features;
+ long r = 0;
+
+ if (cmd == VHOST_SET_BACKEND_FEATURES) {
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+ if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
+ BIT_ULL(VHOST_BACKEND_F_SUSPEND)))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
+ !vhost_vdpa_can_suspend(v))
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&v->vdev, features);
+ return 0;
+ }
+
+ mutex_lock(&d->mutex);
+
+ switch (cmd) {
+ case VHOST_VDPA_GET_DEVICE_ID:
+ r = vhost_vdpa_get_device_id(v, argp);
+ break;
+ case VHOST_VDPA_GET_STATUS:
+ r = vhost_vdpa_get_status(v, argp);
+ break;
+ case VHOST_VDPA_SET_STATUS:
+ r = vhost_vdpa_set_status(v, argp);
+ break;
+ case VHOST_VDPA_GET_CONFIG:
+ r = vhost_vdpa_get_config(v, argp);
+ break;
+ case VHOST_VDPA_SET_CONFIG:
+ r = vhost_vdpa_set_config(v, argp);
+ break;
+ case VHOST_GET_FEATURES:
+ r = vhost_vdpa_get_features(v, argp);
+ break;
+ case VHOST_SET_FEATURES:
+ r = vhost_vdpa_set_features(v, argp);
+ break;
+ case VHOST_VDPA_GET_VRING_NUM:
+ r = vhost_vdpa_get_vring_num(v, argp);
+ break;
+ case VHOST_VDPA_GET_GROUP_NUM:
+ if (copy_to_user(argp, &v->vdpa->ngroups,
+ sizeof(v->vdpa->ngroups)))
+ r = -EFAULT;
+ break;
+ case VHOST_VDPA_GET_AS_NUM:
+ if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
+ r = -EFAULT;
+ break;
+ case VHOST_SET_LOG_BASE:
+ case VHOST_SET_LOG_FD:
+ r = -ENOIOCTLCMD;
+ break;
+ case VHOST_VDPA_SET_CONFIG_CALL:
+ r = vhost_vdpa_set_config_call(v, argp);
+ break;
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VDPA_BACKEND_FEATURES;
+ if (vhost_vdpa_can_suspend(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ r = -EFAULT;
+ break;
+ case VHOST_VDPA_GET_IOVA_RANGE:
+ r = vhost_vdpa_get_iova_range(v, argp);
+ break;
+ case VHOST_VDPA_GET_CONFIG_SIZE:
+ r = vhost_vdpa_get_config_size(v, argp);
+ break;
+ case VHOST_VDPA_GET_VQS_COUNT:
+ r = vhost_vdpa_get_vqs_count(v, argp);
+ break;
+ case VHOST_VDPA_SUSPEND:
+ r = vhost_vdpa_suspend(v);
+ break;
+ default:
+ r = vhost_dev_ioctl(&v->vdev, cmd, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vdpa_vring_ioctl(v, cmd, argp);
+ break;
+ }
+
+ mutex_unlock(&d->mutex);
+ return r;
+}
+static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb_map *map, u32 asid)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ if (ops->dma_map) {
+ ops->dma_unmap(vdpa, asid, map->start, map->size);
+ } else if (ops->set_map == NULL) {
+ iommu_unmap(v->domain, map->start, map->size);
+ }
+}
+
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 start, u64 last, u32 asid)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb_map *map;
+ struct page *page;
+ unsigned long pfn, pinned;
+
+ while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+ pinned = PFN_DOWN(map->size);
+ for (pfn = PFN_DOWN(map->addr);
+ pinned > 0; pfn++, pinned--) {
+ page = pfn_to_page(pfn);
+ if (map->perm & VHOST_ACCESS_WO)
+ set_page_dirty_lock(page);
+ unpin_user_page(page);
+ }
+ atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
+ vhost_vdpa_general_unmap(v, map, asid);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+}
+
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 start, u64 last, u32 asid)
+{
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+
+ while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ vhost_vdpa_general_unmap(v, map, asid);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+}
+
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb, u64 start,
+ u64 last, u32 asid)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
+
+ return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
+}
+
+static int perm_to_iommu_flags(u32 perm)
+{
+ int flags = 0;
+
+ switch (perm) {
+ case VHOST_ACCESS_WO:
+ flags |= IOMMU_WRITE;
+ break;
+ case VHOST_ACCESS_RO:
+ flags |= IOMMU_READ;
+ break;
+ case VHOST_ACCESS_RW:
+ flags |= (IOMMU_WRITE | IOMMU_READ);
+ break;
+ default:
+ WARN(1, "invalidate vhost IOTLB permission\n");
+ break;
+ }
+
+ return flags | IOMMU_CACHE;
+}
+
+static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 asid = iotlb_to_asid(iotlb);
+ int r = 0;
+
+ r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
+ pa, perm, opaque);
+ if (r)
+ return r;
+
+ if (ops->dma_map) {
+ r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
+ } else if (ops->set_map) {
+ if (!v->in_batch)
+ r = ops->set_map(vdpa, asid, iotlb);
+ } else {
+ r = iommu_map(v->domain, iova, pa, size,
+ perm_to_iommu_flags(perm));
+ }
+ if (r) {
+ vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
+ return r;
+ }
+
+ if (!vdpa->use_va)
+ atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
+
+ return 0;
+}
+
+static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 iova, u64 size)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 asid = iotlb_to_asid(iotlb);
+
+ vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
+
+ if (ops->set_map) {
+ if (!v->in_batch)
+ ops->set_map(vdpa, asid, iotlb);
+ }
+
+}
+
+static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
+{
+ struct vhost_dev *dev = &v->vdev;
+ u64 offset, map_size, map_iova = iova;
+ struct vdpa_map_file *map_file;
+ struct vm_area_struct *vma;
+ int ret = 0;
+
+ mmap_read_lock(dev->mm);
+
+ while (size) {
+ vma = find_vma(dev->mm, uaddr);
+ if (!vma) {
+ ret = -EINVAL;
+ break;
+ }
+ map_size = min(size, vma->vm_end - uaddr);
+ if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
+ !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
+ goto next;
+
+ map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
+ if (!map_file) {
+ ret = -ENOMEM;
+ break;
+ }
+ offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
+ map_file->offset = offset;
+ map_file->file = get_file(vma->vm_file);
+ ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
+ perm, map_file);
+ if (ret) {
+ fput(map_file->file);
+ kfree(map_file);
+ break;
+ }
+next:
+ size -= map_size;
+ uaddr += map_size;
+ map_iova += map_size;
+ }
+ if (ret)
+ vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
+
+ mmap_read_unlock(dev->mm);
+
+ return ret;
+}
+
+static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct page **page_list;
+ unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ unsigned int gup_flags = FOLL_LONGTERM;
+ unsigned long npages, cur_base, map_pfn, last_pfn = 0;
+ unsigned long lock_limit, sz2pin, nchunks, i;
+ u64 start = iova;
+ long pinned;
+ int ret = 0;
+
+ /* Limit the use of memory for bookkeeping */
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ if (perm & VHOST_ACCESS_WO)
+ gup_flags |= FOLL_WRITE;
+
+ npages = PFN_UP(size + (iova & ~PAGE_MASK));
+ if (!npages) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ mmap_read_lock(dev->mm);
+
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
+ if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ cur_base = uaddr & PAGE_MASK;
+ iova &= PAGE_MASK;
+ nchunks = 0;
+
+ while (npages) {
+ sz2pin = min_t(unsigned long, npages, list_size);
+ pinned = pin_user_pages(cur_base, sz2pin,
+ gup_flags, page_list, NULL);
+ if (sz2pin != pinned) {
+ if (pinned < 0) {
+ ret = pinned;
+ } else {
+ unpin_user_pages(page_list, pinned);
+ ret = -ENOMEM;
+ }
+ goto out;
+ }
+ nchunks++;
+
+ if (!last_pfn)
+ map_pfn = page_to_pfn(page_list[0]);
+
+ for (i = 0; i < pinned; i++) {
+ unsigned long this_pfn = page_to_pfn(page_list[i]);
+ u64 csize;
+
+ if (last_pfn && (this_pfn != last_pfn + 1)) {
+ /* Pin a contiguous chunk of memory */
+ csize = PFN_PHYS(last_pfn - map_pfn + 1);
+ ret = vhost_vdpa_map(v, iotlb, iova, csize,
+ PFN_PHYS(map_pfn),
+ perm, NULL);
+ if (ret) {
+ /*
+ * Unpin the pages that are left unmapped
+ * from this point on in the current
+ * page_list. The remaining outstanding
+ * ones which may stride across several
+ * chunks will be covered in the common
+ * error path subsequently.
+ */
+ unpin_user_pages(&page_list[i],
+ pinned - i);
+ goto out;
+ }
+
+ map_pfn = this_pfn;
+ iova += csize;
+ nchunks = 0;
+ }
+
+ last_pfn = this_pfn;
+ }
+
+ cur_base += PFN_PHYS(pinned);
+ npages -= pinned;
+ }
+
+ /* Pin the rest chunk */
+ ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+ PFN_PHYS(map_pfn), perm, NULL);
+out:
+ if (ret) {
+ if (nchunks) {
+ unsigned long pfn;
+
+ /*
+ * Unpin the outstanding pages which are yet to be
+ * mapped but haven't due to vdpa_map() or
+ * pin_user_pages() failure.
+ *
+ * Mapped pages are accounted in vdpa_map(), hence
+ * the corresponding unpinning will be handled by
+ * vdpa_unmap().
+ */
+ WARN_ON(!last_pfn);
+ for (pfn = map_pfn; pfn <= last_pfn; pfn++)
+ unpin_user_page(pfn_to_page(pfn));
+ }
+ vhost_vdpa_unmap(v, iotlb, start, size);
+ }
+unlock:
+ mmap_read_unlock(dev->mm);
+free:
+ free_page((unsigned long)page_list);
+ return ret;
+
+}
+
+static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ if (msg->iova < v->range.first || !msg->size ||
+ msg->iova > U64_MAX - msg->size + 1 ||
+ msg->iova + msg->size - 1 > v->range.last)
+ return -EINVAL;
+
+ if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ msg->iova + msg->size - 1))
+ return -EEXIST;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
+ msg->uaddr, msg->perm);
+
+ return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
+ msg->perm);
+}
+
+static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_iotlb *iotlb = NULL;
+ struct vhost_vdpa_as *as = NULL;
+ int r = 0;
+
+ mutex_lock(&dev->mutex);
+
+ r = vhost_dev_check_owner(dev);
+ if (r)
+ goto unlock;
+
+ if (msg->type == VHOST_IOTLB_UPDATE ||
+ msg->type == VHOST_IOTLB_BATCH_BEGIN) {
+ as = vhost_vdpa_find_alloc_as(v, asid);
+ if (!as) {
+ dev_err(&v->dev, "can't find and alloc asid %d\n",
+ asid);
+ r = -EINVAL;
+ goto unlock;
+ }
+ iotlb = &as->iotlb;
+ } else
+ iotlb = asid_to_iotlb(v, asid);
+
+ if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
+ if (v->in_batch && v->batch_asid != asid) {
+ dev_info(&v->dev, "batch id %d asid %d\n",
+ v->batch_asid, asid);
+ }
+ if (!iotlb)
+ dev_err(&v->dev, "no iotlb for asid %d\n", asid);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+ r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
+ break;
+ case VHOST_IOTLB_BATCH_BEGIN:
+ v->batch_asid = asid;
+ v->in_batch = true;
+ break;
+ case VHOST_IOTLB_BATCH_END:
+ if (v->in_batch && ops->set_map)
+ ops->set_map(vdpa, asid, iotlb);
+ v->in_batch = false;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+unlock:
+ mutex_unlock(&dev->mutex);
+
+ return r;
+}
+
+static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vdpa *v = file->private_data;
+ struct vhost_dev *dev = &v->vdev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+ struct bus_type *bus;
+ int ret;
+
+ /* Device want to do DMA by itself */
+ if (ops->set_map || ops->dma_map)
+ return 0;
+
+ bus = dma_dev->bus;
+ if (!bus)
+ return -EFAULT;
+
+ if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY))
+ return -ENOTSUPP;
+
+ v->domain = iommu_domain_alloc(bus);
+ if (!v->domain)
+ return -EIO;
+
+ ret = iommu_attach_device(v->domain, dma_dev);
+ if (ret)
+ goto err_attach;
+
+ return 0;
+
+err_attach:
+ iommu_domain_free(v->domain);
+ v->domain = NULL;
+ return ret;
+}
+
+static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+
+ if (v->domain) {
+ iommu_detach_device(v->domain, dma_dev);
+ iommu_domain_free(v->domain);
+ }
+
+ v->domain = NULL;
+}
+
+static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
+{
+ struct vdpa_iova_range *range = &v->range;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->get_iova_range) {
+ *range = ops->get_iova_range(vdpa);
+ } else if (v->domain && v->domain->geometry.force_aperture) {
+ range->first = v->domain->geometry.aperture_start;
+ range->last = v->domain->geometry.aperture_end;
+ } else {
+ range->first = 0;
+ range->last = ULLONG_MAX;
+ }
+}
+
+static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
+{
+ struct vhost_vdpa_as *as;
+ u32 asid;
+
+ for (asid = 0; asid < v->vdpa->nas; asid++) {
+ as = asid_to_as(v, asid);
+ if (as)
+ vhost_vdpa_remove_as(v, asid);
+ }
+
+ vhost_vdpa_free_domain(v);
+ vhost_dev_cleanup(&v->vdev);
+ kfree(v->vdev.vqs);
+}
+
+static int vhost_vdpa_open(struct inode *inode, struct file *filep)
+{
+ struct vhost_vdpa *v;
+ struct vhost_dev *dev;
+ struct vhost_virtqueue **vqs;
+ int r, opened;
+ u32 i, nvqs;
+
+ v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
+
+ opened = atomic_cmpxchg(&v->opened, 0, 1);
+ if (opened)
+ return -EBUSY;
+
+ nvqs = v->nvqs;
+ r = vhost_vdpa_reset(v);
+ if (r)
+ goto err;
+
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ dev = &v->vdev;
+ for (i = 0; i < nvqs; i++) {
+ vqs[i] = &v->vqs[i];
+ vqs[i]->handle_kick = handle_vq_kick;
+ }
+ vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
+ vhost_vdpa_process_iotlb_msg);
+
+ r = vhost_vdpa_alloc_domain(v);
+ if (r)
+ goto err_alloc_domain;
+
+ vhost_vdpa_set_iova_range(v);
+
+ filep->private_data = v;
+
+ return 0;
+
+err_alloc_domain:
+ vhost_vdpa_cleanup(v);
+err:
+ atomic_dec(&v->opened);
+ return r;
+}
+
+static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
+{
+ u32 i;
+
+ for (i = 0; i < v->nvqs; i++)
+ vhost_vdpa_unsetup_vq_irq(v, i);
+}
+
+static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+{
+ struct vhost_vdpa *v = filep->private_data;
+ struct vhost_dev *d = &v->vdev;
+
+ mutex_lock(&d->mutex);
+ filep->private_data = NULL;
+ vhost_vdpa_clean_irq(v);
+ vhost_vdpa_reset(v);
+ vhost_dev_stop(&v->vdev);
+ vhost_vdpa_config_put(v);
+ vhost_vdpa_cleanup(v);
+ mutex_unlock(&d->mutex);
+
+ atomic_dec(&v->opened);
+ complete(&v->completion);
+
+ return 0;
+}
+
+#ifdef CONFIG_MMU
+static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
+{
+ struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_notification_area notify;
+ struct vm_area_struct *vma = vmf->vma;
+ u16 index = vma->vm_pgoff;
+
+ notify = ops->get_vq_notification(vdpa, index);
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
+ PFN_DOWN(notify.addr), PAGE_SIZE,
+ vma->vm_page_prot))
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct vhost_vdpa_vm_ops = {
+ .fault = vhost_vdpa_fault,
+};
+
+static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vhost_vdpa *v = vma->vm_file->private_data;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_notification_area notify;
+ unsigned long index = vma->vm_pgoff;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (vma->vm_flags & VM_READ)
+ return -EINVAL;
+ if (index > 65535)
+ return -EINVAL;
+ if (!ops->get_vq_notification)
+ return -ENOTSUPP;
+
+ /* To be safe and easily modelled by userspace, We only
+ * support the doorbell which sits on the page boundary and
+ * does not share the page with other registers.
+ */
+ notify = ops->get_vq_notification(vdpa, index);
+ if (notify.addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+ if (vma->vm_end - vma->vm_start != notify.size)
+ return -ENOTSUPP;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &vhost_vdpa_vm_ops;
+ return 0;
+}
+#endif /* CONFIG_MMU */
+
+static const struct file_operations vhost_vdpa_fops = {
+ .owner = THIS_MODULE,
+ .open = vhost_vdpa_open,
+ .release = vhost_vdpa_release,
+ .write_iter = vhost_vdpa_chr_write_iter,
+ .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
+#ifdef CONFIG_MMU
+ .mmap = vhost_vdpa_mmap,
+#endif /* CONFIG_MMU */
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static void vhost_vdpa_release_dev(struct device *device)
+{
+ struct vhost_vdpa *v =
+ container_of(device, struct vhost_vdpa, dev);
+
+ ida_simple_remove(&vhost_vdpa_ida, v->minor);
+ kfree(v->vqs);
+ kfree(v);
+}
+
+static int vhost_vdpa_probe(struct vdpa_device *vdpa)
+{
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_vdpa *v;
+ int minor;
+ int i, r;
+
+ /* We can't support platform IOMMU device with more than 1
+ * group or as
+ */
+ if (!ops->set_map && !ops->dma_map &&
+ (vdpa->ngroups > 1 || vdpa->nas > 1))
+ return -EOPNOTSUPP;
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!v)
+ return -ENOMEM;
+
+ minor = ida_simple_get(&vhost_vdpa_ida, 0,
+ VHOST_VDPA_DEV_MAX, GFP_KERNEL);
+ if (minor < 0) {
+ kfree(v);
+ return minor;
+ }
+
+ atomic_set(&v->opened, 0);
+ v->minor = minor;
+ v->vdpa = vdpa;
+ v->nvqs = vdpa->nvqs;
+ v->virtio_id = ops->get_device_id(vdpa);
+
+ device_initialize(&v->dev);
+ v->dev.release = vhost_vdpa_release_dev;
+ v->dev.parent = &vdpa->dev;
+ v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
+ v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
+ GFP_KERNEL);
+ if (!v->vqs) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
+ if (r)
+ goto err;
+
+ cdev_init(&v->cdev, &vhost_vdpa_fops);
+ v->cdev.owner = THIS_MODULE;
+
+ r = cdev_device_add(&v->cdev, &v->dev);
+ if (r)
+ goto err;
+
+ init_completion(&v->completion);
+ vdpa_set_drvdata(vdpa, v);
+
+ for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
+ INIT_HLIST_HEAD(&v->as[i]);
+
+ return 0;
+
+err:
+ put_device(&v->dev);
+ return r;
+}
+
+static void vhost_vdpa_remove(struct vdpa_device *vdpa)
+{
+ struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
+ int opened;
+
+ cdev_device_del(&v->cdev, &v->dev);
+
+ do {
+ opened = atomic_cmpxchg(&v->opened, 0, 1);
+ if (!opened)
+ break;
+ wait_for_completion(&v->completion);
+ } while (1);
+
+ put_device(&v->dev);
+}
+
+static struct vdpa_driver vhost_vdpa_driver = {
+ .driver = {
+ .name = "vhost_vdpa",
+ },
+ .probe = vhost_vdpa_probe,
+ .remove = vhost_vdpa_remove,
+};
+
+static int __init vhost_vdpa_init(void)
+{
+ int r;
+
+ r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
+ "vhost-vdpa");
+ if (r)
+ goto err_alloc_chrdev;
+
+ r = vdpa_register_driver(&vhost_vdpa_driver);
+ if (r)
+ goto err_vdpa_register_driver;
+
+ return 0;
+
+err_vdpa_register_driver:
+ unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
+err_alloc_chrdev:
+ return r;
+}
+module_init(vhost_vdpa_init);
+
+static void __exit vhost_vdpa_exit(void)
+{
+ vdpa_unregister_driver(&vhost_vdpa_driver);
+ unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
+}
+module_exit(vhost_vdpa_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
new file mode 100644
index 000000000..2eea08029
--- /dev/null
+++ b/drivers/vhost/vhost.c
@@ -0,0 +1,2662 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Copyright (C) 2006 Rusty Russell IBM Corporation
+ *
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * Inspiration, some code, and most witty comments come from
+ * Documentation/virtual/lguest/lguest.c, by Rusty Russell
+ *
+ * Generic code for virtio server in host kernel.
+ */
+
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/uio.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/kthread.h>
+#include <linux/cgroup.h>
+#include <linux/module.h>
+#include <linux/sort.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/nospec.h>
+#include <linux/kcov.h>
+
+#include "vhost.h"
+
+static ushort max_mem_regions = 64;
+module_param(max_mem_regions, ushort, 0444);
+MODULE_PARM_DESC(max_mem_regions,
+ "Maximum number of memory regions in memory map. (default: 64)");
+static int max_iotlb_entries = 2048;
+module_param(max_iotlb_entries, int, 0444);
+MODULE_PARM_DESC(max_iotlb_entries,
+ "Maximum number of iotlb entries. (default: 2048)");
+
+enum {
+ VHOST_MEMORY_F_LOG = 0x1,
+};
+
+#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
+#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
+
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
+{
+ vq->user_be = !virtio_legacy_is_little_endian();
+}
+
+static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
+{
+ vq->user_be = true;
+}
+
+static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
+{
+ vq->user_be = false;
+}
+
+static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
+{
+ struct vhost_vring_state s;
+
+ if (vq->private_data)
+ return -EBUSY;
+
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+
+ if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
+ s.num != VHOST_VRING_BIG_ENDIAN)
+ return -EINVAL;
+
+ if (s.num == VHOST_VRING_BIG_ENDIAN)
+ vhost_enable_cross_endian_big(vq);
+ else
+ vhost_enable_cross_endian_little(vq);
+
+ return 0;
+}
+
+static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+ int __user *argp)
+{
+ struct vhost_vring_state s = {
+ .index = idx,
+ .num = vq->user_be
+ };
+
+ if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void vhost_init_is_le(struct vhost_virtqueue *vq)
+{
+ /* Note for legacy virtio: user_be is initialized at reset time
+ * according to the host endianness. If userspace does not set an
+ * explicit endianness, the default behavior is native endian, as
+ * expected by legacy virtio.
+ */
+ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
+}
+#else
+static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
+{
+}
+
+static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
+{
+ return -ENOIOCTLCMD;
+}
+
+static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+ int __user *argp)
+{
+ return -ENOIOCTLCMD;
+}
+
+static void vhost_init_is_le(struct vhost_virtqueue *vq)
+{
+ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+ || virtio_legacy_is_little_endian();
+}
+#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
+
+static void vhost_reset_is_le(struct vhost_virtqueue *vq)
+{
+ vhost_init_is_le(vq);
+}
+
+struct vhost_flush_struct {
+ struct vhost_work work;
+ struct completion wait_event;
+};
+
+static void vhost_flush_work(struct vhost_work *work)
+{
+ struct vhost_flush_struct *s;
+
+ s = container_of(work, struct vhost_flush_struct, work);
+ complete(&s->wait_event);
+}
+
+static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
+ poll_table *pt)
+{
+ struct vhost_poll *poll;
+
+ poll = container_of(pt, struct vhost_poll, table);
+ poll->wqh = wqh;
+ add_wait_queue(wqh, &poll->wait);
+}
+
+static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
+ void *key)
+{
+ struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
+ struct vhost_work *work = &poll->work;
+
+ if (!(key_to_poll(key) & poll->mask))
+ return 0;
+
+ if (!poll->dev->use_worker)
+ work->fn(work);
+ else
+ vhost_poll_queue(poll);
+
+ return 0;
+}
+
+void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
+ work->fn = fn;
+}
+EXPORT_SYMBOL_GPL(vhost_work_init);
+
+/* Init poll structure */
+void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
+ __poll_t mask, struct vhost_dev *dev)
+{
+ init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
+ init_poll_funcptr(&poll->table, vhost_poll_func);
+ poll->mask = mask;
+ poll->dev = dev;
+ poll->wqh = NULL;
+
+ vhost_work_init(&poll->work, fn);
+}
+EXPORT_SYMBOL_GPL(vhost_poll_init);
+
+/* Start polling a file. We add ourselves to file's wait queue. The caller must
+ * keep a reference to a file until after vhost_poll_stop is called. */
+int vhost_poll_start(struct vhost_poll *poll, struct file *file)
+{
+ __poll_t mask;
+
+ if (poll->wqh)
+ return 0;
+
+ mask = vfs_poll(file, &poll->table);
+ if (mask)
+ vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
+ if (mask & EPOLLERR) {
+ vhost_poll_stop(poll);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_poll_start);
+
+/* Stop polling a file. After this function returns, it becomes safe to drop the
+ * file reference. You must also flush afterwards. */
+void vhost_poll_stop(struct vhost_poll *poll)
+{
+ if (poll->wqh) {
+ remove_wait_queue(poll->wqh, &poll->wait);
+ poll->wqh = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(vhost_poll_stop);
+
+void vhost_dev_flush(struct vhost_dev *dev)
+{
+ struct vhost_flush_struct flush;
+
+ if (dev->worker) {
+ init_completion(&flush.wait_event);
+ vhost_work_init(&flush.work, vhost_flush_work);
+
+ vhost_work_queue(dev, &flush.work);
+ wait_for_completion(&flush.wait_event);
+ }
+}
+EXPORT_SYMBOL_GPL(vhost_dev_flush);
+
+void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
+{
+ if (!dev->worker)
+ return;
+
+ if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
+ /* We can only add the work to the list after we're
+ * sure it was not in the list.
+ * test_and_set_bit() implies a memory barrier.
+ */
+ llist_add(&work->node, &dev->work_list);
+ wake_up_process(dev->worker);
+ }
+}
+EXPORT_SYMBOL_GPL(vhost_work_queue);
+
+/* A lockless hint for busy polling code to exit the loop */
+bool vhost_has_work(struct vhost_dev *dev)
+{
+ return !llist_empty(&dev->work_list);
+}
+EXPORT_SYMBOL_GPL(vhost_has_work);
+
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+ vhost_work_queue(poll->dev, &poll->work);
+}
+EXPORT_SYMBOL_GPL(vhost_poll_queue);
+
+static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
+{
+ int j;
+
+ for (j = 0; j < VHOST_NUM_ADDRS; j++)
+ vq->meta_iotlb[j] = NULL;
+}
+
+static void vhost_vq_meta_reset(struct vhost_dev *d)
+{
+ int i;
+
+ for (i = 0; i < d->nvqs; ++i)
+ __vhost_vq_meta_reset(d->vqs[i]);
+}
+
+static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
+{
+ call_ctx->ctx = NULL;
+ memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
+}
+
+bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
+{
+ return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
+}
+EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
+
+static void vhost_vq_reset(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq)
+{
+ vq->num = 1;
+ vq->desc = NULL;
+ vq->avail = NULL;
+ vq->used = NULL;
+ vq->last_avail_idx = 0;
+ vq->avail_idx = 0;
+ vq->last_used_idx = 0;
+ vq->signalled_used = 0;
+ vq->signalled_used_valid = false;
+ vq->used_flags = 0;
+ vq->log_used = false;
+ vq->log_addr = -1ull;
+ vq->private_data = NULL;
+ vq->acked_features = 0;
+ vq->acked_backend_features = 0;
+ vq->log_base = NULL;
+ vq->error_ctx = NULL;
+ vq->kick = NULL;
+ vq->log_ctx = NULL;
+ vhost_disable_cross_endian(vq);
+ vhost_reset_is_le(vq);
+ vq->busyloop_timeout = 0;
+ vq->umem = NULL;
+ vq->iotlb = NULL;
+ vhost_vring_call_reset(&vq->call_ctx);
+ __vhost_vq_meta_reset(vq);
+}
+
+static int vhost_worker(void *data)
+{
+ struct vhost_dev *dev = data;
+ struct vhost_work *work, *work_next;
+ struct llist_node *node;
+
+ kthread_use_mm(dev->mm);
+
+ for (;;) {
+ /* mb paired w/ kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ break;
+ }
+
+ node = llist_del_all(&dev->work_list);
+ if (!node)
+ schedule();
+
+ node = llist_reverse_order(node);
+ /* make sure flag is seen after deletion */
+ smp_wmb();
+ llist_for_each_entry_safe(work, work_next, node, node) {
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
+ __set_current_state(TASK_RUNNING);
+ kcov_remote_start_common(dev->kcov_handle);
+ work->fn(work);
+ kcov_remote_stop();
+ if (need_resched())
+ schedule();
+ }
+ }
+ kthread_unuse_mm(dev->mm);
+ return 0;
+}
+
+static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
+{
+ kfree(vq->indirect);
+ vq->indirect = NULL;
+ kfree(vq->log);
+ vq->log = NULL;
+ kfree(vq->heads);
+ vq->heads = NULL;
+}
+
+/* Helper to allocate iovec buffers for all vqs. */
+static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ for (i = 0; i < dev->nvqs; ++i) {
+ vq = dev->vqs[i];
+ vq->indirect = kmalloc_array(UIO_MAXIOV,
+ sizeof(*vq->indirect),
+ GFP_KERNEL);
+ vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
+ GFP_KERNEL);
+ vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
+ GFP_KERNEL);
+ if (!vq->indirect || !vq->log || !vq->heads)
+ goto err_nomem;
+ }
+ return 0;
+
+err_nomem:
+ for (; i >= 0; --i)
+ vhost_vq_free_iovecs(dev->vqs[i]);
+ return -ENOMEM;
+}
+
+static void vhost_dev_free_iovecs(struct vhost_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nvqs; ++i)
+ vhost_vq_free_iovecs(dev->vqs[i]);
+}
+
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
+ int pkts, int total_len)
+{
+ struct vhost_dev *dev = vq->dev;
+
+ if ((dev->byte_weight && total_len >= dev->byte_weight) ||
+ pkts >= dev->weight) {
+ vhost_poll_queue(&vq->poll);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
+
+static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
+ unsigned int num)
+{
+ size_t event __maybe_unused =
+ vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ return sizeof(*vq->avail) +
+ sizeof(*vq->avail->ring) * num + event;
+}
+
+static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
+ unsigned int num)
+{
+ size_t event __maybe_unused =
+ vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ return sizeof(*vq->used) +
+ sizeof(*vq->used->ring) * num + event;
+}
+
+static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
+ unsigned int num)
+{
+ return sizeof(*vq->desc) * num;
+}
+
+void vhost_dev_init(struct vhost_dev *dev,
+ struct vhost_virtqueue **vqs, int nvqs,
+ int iov_limit, int weight, int byte_weight,
+ bool use_worker,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg))
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ dev->vqs = vqs;
+ dev->nvqs = nvqs;
+ mutex_init(&dev->mutex);
+ dev->log_ctx = NULL;
+ dev->umem = NULL;
+ dev->iotlb = NULL;
+ dev->mm = NULL;
+ dev->worker = NULL;
+ dev->iov_limit = iov_limit;
+ dev->weight = weight;
+ dev->byte_weight = byte_weight;
+ dev->use_worker = use_worker;
+ dev->msg_handler = msg_handler;
+ init_llist_head(&dev->work_list);
+ init_waitqueue_head(&dev->wait);
+ INIT_LIST_HEAD(&dev->read_list);
+ INIT_LIST_HEAD(&dev->pending_list);
+ spin_lock_init(&dev->iotlb_lock);
+
+
+ for (i = 0; i < dev->nvqs; ++i) {
+ vq = dev->vqs[i];
+ vq->log = NULL;
+ vq->indirect = NULL;
+ vq->heads = NULL;
+ vq->dev = dev;
+ mutex_init(&vq->mutex);
+ vhost_vq_reset(dev, vq);
+ if (vq->handle_kick)
+ vhost_poll_init(&vq->poll, vq->handle_kick,
+ EPOLLIN, dev);
+ }
+}
+EXPORT_SYMBOL_GPL(vhost_dev_init);
+
+/* Caller should have device mutex */
+long vhost_dev_check_owner(struct vhost_dev *dev)
+{
+ /* Are you the owner? If not, I don't think you mean to do that */
+ return dev->mm == current->mm ? 0 : -EPERM;
+}
+EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
+
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+ struct vhost_attach_cgroups_struct attach;
+
+ attach.owner = current;
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_work_queue(dev, &attach.work);
+ vhost_dev_flush(dev);
+ return attach.ret;
+}
+
+/* Caller should have device mutex */
+bool vhost_dev_has_owner(struct vhost_dev *dev)
+{
+ return dev->mm;
+}
+EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
+
+static void vhost_attach_mm(struct vhost_dev *dev)
+{
+ /* No owner, become one */
+ if (dev->use_worker) {
+ dev->mm = get_task_mm(current);
+ } else {
+ /* vDPA device does not use worker thead, so there's
+ * no need to hold the address space for mm. This help
+ * to avoid deadlock in the case of mmap() which may
+ * held the refcnt of the file and depends on release
+ * method to remove vma.
+ */
+ dev->mm = current->mm;
+ mmgrab(dev->mm);
+ }
+}
+
+static void vhost_detach_mm(struct vhost_dev *dev)
+{
+ if (!dev->mm)
+ return;
+
+ if (dev->use_worker)
+ mmput(dev->mm);
+ else
+ mmdrop(dev->mm);
+
+ dev->mm = NULL;
+}
+
+/* Caller should have device mutex */
+long vhost_dev_set_owner(struct vhost_dev *dev)
+{
+ struct task_struct *worker;
+ int err;
+
+ /* Is there an owner already? */
+ if (vhost_dev_has_owner(dev)) {
+ err = -EBUSY;
+ goto err_mm;
+ }
+
+ vhost_attach_mm(dev);
+
+ dev->kcov_handle = kcov_common_handle();
+ if (dev->use_worker) {
+ worker = kthread_create(vhost_worker, dev,
+ "vhost-%d", current->pid);
+ if (IS_ERR(worker)) {
+ err = PTR_ERR(worker);
+ goto err_worker;
+ }
+
+ dev->worker = worker;
+ wake_up_process(worker); /* avoid contributing to loadavg */
+
+ err = vhost_attach_cgroups(dev);
+ if (err)
+ goto err_cgroup;
+ }
+
+ err = vhost_dev_alloc_iovecs(dev);
+ if (err)
+ goto err_cgroup;
+
+ return 0;
+err_cgroup:
+ if (dev->worker) {
+ kthread_stop(dev->worker);
+ dev->worker = NULL;
+ }
+err_worker:
+ vhost_detach_mm(dev);
+ dev->kcov_handle = 0;
+err_mm:
+ return err;
+}
+EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
+
+static struct vhost_iotlb *iotlb_alloc(void)
+{
+ return vhost_iotlb_alloc(max_iotlb_entries,
+ VHOST_IOTLB_FLAG_RETIRE);
+}
+
+struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
+{
+ return iotlb_alloc();
+}
+EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
+
+/* Caller should have device mutex */
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
+{
+ int i;
+
+ vhost_dev_cleanup(dev);
+
+ dev->umem = umem;
+ /* We don't need VQ locks below since vhost_dev_cleanup makes sure
+ * VQs aren't running.
+ */
+ for (i = 0; i < dev->nvqs; ++i)
+ dev->vqs[i]->umem = umem;
+}
+EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
+
+void vhost_dev_stop(struct vhost_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nvqs; ++i) {
+ if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick)
+ vhost_poll_stop(&dev->vqs[i]->poll);
+ }
+
+ vhost_dev_flush(dev);
+}
+EXPORT_SYMBOL_GPL(vhost_dev_stop);
+
+void vhost_clear_msg(struct vhost_dev *dev)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&dev->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &dev->read_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ list_for_each_entry_safe(node, n, &dev->pending_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ spin_unlock(&dev->iotlb_lock);
+}
+EXPORT_SYMBOL_GPL(vhost_clear_msg);
+
+void vhost_dev_cleanup(struct vhost_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nvqs; ++i) {
+ if (dev->vqs[i]->error_ctx)
+ eventfd_ctx_put(dev->vqs[i]->error_ctx);
+ if (dev->vqs[i]->kick)
+ fput(dev->vqs[i]->kick);
+ if (dev->vqs[i]->call_ctx.ctx)
+ eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
+ vhost_vq_reset(dev, dev->vqs[i]);
+ }
+ vhost_dev_free_iovecs(dev);
+ if (dev->log_ctx)
+ eventfd_ctx_put(dev->log_ctx);
+ dev->log_ctx = NULL;
+ /* No one will access memory at this point */
+ vhost_iotlb_free(dev->umem);
+ dev->umem = NULL;
+ vhost_iotlb_free(dev->iotlb);
+ dev->iotlb = NULL;
+ vhost_clear_msg(dev);
+ wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
+ WARN_ON(!llist_empty(&dev->work_list));
+ if (dev->worker) {
+ kthread_stop(dev->worker);
+ dev->worker = NULL;
+ dev->kcov_handle = 0;
+ }
+ vhost_detach_mm(dev);
+}
+EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
+
+static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
+{
+ u64 a = addr / VHOST_PAGE_SIZE / 8;
+
+ /* Make sure 64 bit math will not overflow. */
+ if (a > ULONG_MAX - (unsigned long)log_base ||
+ a + (unsigned long)log_base > ULONG_MAX)
+ return false;
+
+ return access_ok(log_base + a,
+ (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
+}
+
+/* Make sure 64 bit math will not overflow. */
+static bool vhost_overflow(u64 uaddr, u64 size)
+{
+ if (uaddr > ULONG_MAX || size > ULONG_MAX)
+ return true;
+
+ if (!size)
+ return false;
+
+ return uaddr > ULONG_MAX - size + 1;
+}
+
+/* Caller should have vq mutex and device mutex. */
+static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
+ int log_all)
+{
+ struct vhost_iotlb_map *map;
+
+ if (!umem)
+ return false;
+
+ list_for_each_entry(map, &umem->list, link) {
+ unsigned long a = map->addr;
+
+ if (vhost_overflow(map->addr, map->size))
+ return false;
+
+
+ if (!access_ok((void __user *)a, map->size))
+ return false;
+ else if (log_all && !log_access_ok(log_base,
+ map->start,
+ map->size))
+ return false;
+ }
+ return true;
+}
+
+static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
+ u64 addr, unsigned int size,
+ int type)
+{
+ const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
+
+ if (!map)
+ return NULL;
+
+ return (void __user *)(uintptr_t)(map->addr + addr - map->start);
+}
+
+/* Can we switch to this memory table? */
+/* Caller should have device mutex but not vq mutex */
+static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
+ int log_all)
+{
+ int i;
+
+ for (i = 0; i < d->nvqs; ++i) {
+ bool ok;
+ bool log;
+
+ mutex_lock(&d->vqs[i]->mutex);
+ log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
+ /* If ring is inactive, will check when it's enabled. */
+ if (d->vqs[i]->private_data)
+ ok = vq_memory_access_ok(d->vqs[i]->log_base,
+ umem, log);
+ else
+ ok = true;
+ mutex_unlock(&d->vqs[i]->mutex);
+ if (!ok)
+ return false;
+ }
+ return true;
+}
+
+static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ struct iovec iov[], int iov_size, int access);
+
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
+ const void *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_to_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that all vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ struct iov_iter t;
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)to, size,
+ VHOST_ADDR_USED);
+
+ if (uaddr)
+ return __copy_to_user(uaddr, from, size);
+
+ ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_WO);
+ if (ret < 0)
+ goto out;
+ iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size);
+ ret = copy_to_iter(from, size, &t);
+ if (ret == size)
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
+ void __user *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_from_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)from, size,
+ VHOST_ADDR_DESC);
+ struct iov_iter f;
+
+ if (uaddr)
+ return __copy_from_user(to, uaddr, size);
+
+ ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", from,
+ (unsigned long long) size);
+ goto out;
+ }
+ iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size);
+ ret = copy_from_iter(to, size, &f);
+ if (ret == size)
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
+ void __user *addr, unsigned int size,
+ int type)
+{
+ int ret;
+
+ ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
+ }
+
+ if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
+ vq_err(vq, "Non atomic userspace memory access: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
+ }
+
+ return vq->iotlb_iov[0].iov_base;
+}
+
+/* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
+ void __user *addr, unsigned int size,
+ int type)
+{
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)addr, size, type);
+ if (uaddr)
+ return uaddr;
+
+ return __vhost_get_user_slow(vq, addr, size, type);
+}
+
+#define vhost_put_user(vq, x, ptr) \
+({ \
+ int ret; \
+ if (!vq->iotlb) { \
+ ret = __put_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) to = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
+ sizeof(*ptr), VHOST_ADDR_USED); \
+ if (to != NULL) \
+ ret = __put_user(x, to); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
+{
+ return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
+ vhost_avail_event(vq));
+}
+
+static inline int vhost_put_used(struct vhost_virtqueue *vq,
+ struct vring_used_elem *head, int idx,
+ int count)
+{
+ return vhost_copy_to_user(vq, vq->used->ring + idx, head,
+ count * sizeof(*head));
+}
+
+static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
+
+{
+ return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
+ &vq->used->flags);
+}
+
+static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
+
+{
+ return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
+ &vq->used->idx);
+}
+
+#define vhost_get_user(vq, x, ptr, type) \
+({ \
+ int ret; \
+ if (!vq->iotlb) { \
+ ret = __get_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) from = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
+ sizeof(*ptr), \
+ type); \
+ if (from != NULL) \
+ ret = __get_user(x, from); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+#define vhost_get_avail(vq, x, ptr) \
+ vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
+
+#define vhost_get_used(vq, x, ptr) \
+ vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
+
+static void vhost_dev_lock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_lock_nested(&d->vqs[i]->mutex, i);
+}
+
+static void vhost_dev_unlock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_unlock(&d->vqs[i]->mutex);
+}
+
+static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
+ __virtio16 *idx)
+{
+ return vhost_get_avail(vq, *idx, &vq->avail->idx);
+}
+
+static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
+ __virtio16 *head, int idx)
+{
+ return vhost_get_avail(vq, *head,
+ &vq->avail->ring[idx & (vq->num - 1)]);
+}
+
+static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
+ __virtio16 *flags)
+{
+ return vhost_get_avail(vq, *flags, &vq->avail->flags);
+}
+
+static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
+ __virtio16 *event)
+{
+ return vhost_get_avail(vq, *event, vhost_used_event(vq));
+}
+
+static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
+ __virtio16 *idx)
+{
+ return vhost_get_used(vq, *idx, &vq->used->idx);
+}
+
+static inline int vhost_get_desc(struct vhost_virtqueue *vq,
+ struct vring_desc *desc, int idx)
+{
+ return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
+}
+
+static void vhost_iotlb_notify_vq(struct vhost_dev *d,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&d->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &d->pending_list, node) {
+ struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
+ if (msg->iova <= vq_msg->iova &&
+ msg->iova + msg->size - 1 >= vq_msg->iova &&
+ vq_msg->type == VHOST_IOTLB_MISS) {
+ vhost_poll_queue(&node->vq->poll);
+ list_del(&node->node);
+ kfree(node);
+ }
+ }
+
+ spin_unlock(&d->iotlb_lock);
+}
+
+static bool umem_access_ok(u64 uaddr, u64 size, int access)
+{
+ unsigned long a = uaddr;
+
+ /* Make sure 64 bit math will not overflow. */
+ if (vhost_overflow(uaddr, size))
+ return false;
+
+ if ((access & VHOST_ACCESS_RO) &&
+ !access_ok((void __user *)a, size))
+ return false;
+ if ((access & VHOST_ACCESS_WO) &&
+ !access_ok((void __user *)a, size))
+ return false;
+ return true;
+}
+
+static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg)
+{
+ int ret = 0;
+
+ if (asid != 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->mutex);
+ vhost_dev_lock_vqs(dev);
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
+ ret = -EFAULT;
+ break;
+ }
+ vhost_vq_meta_reset(dev);
+ if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1,
+ msg->uaddr, msg->perm)) {
+ ret = -ENOMEM;
+ break;
+ }
+ vhost_iotlb_notify_vq(dev, msg);
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ vhost_vq_meta_reset(dev);
+ vhost_iotlb_del_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ vhost_dev_unlock_vqs(dev);
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from)
+{
+ struct vhost_iotlb_msg msg;
+ size_t offset;
+ int type, ret;
+ u32 asid = 0;
+
+ ret = copy_from_iter(&type, sizeof(type), from);
+ if (ret != sizeof(type)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (type) {
+ case VHOST_IOTLB_MSG:
+ /* There maybe a hole after type for V1 message type,
+ * so skip it here.
+ */
+ offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ if (vhost_backend_has_feature(dev->vqs[0],
+ VHOST_BACKEND_F_IOTLB_ASID)) {
+ ret = copy_from_iter(&asid, sizeof(asid), from);
+ if (ret != sizeof(asid)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ offset = 0;
+ } else
+ offset = sizeof(__u32);
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+
+ iov_iter_advance(from, offset);
+ ret = copy_from_iter(&msg, sizeof(msg), from);
+ if (ret != sizeof(msg)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (dev->msg_handler)
+ ret = dev->msg_handler(dev, asid, &msg);
+ else
+ ret = vhost_process_iotlb_msg(dev, asid, &msg);
+ if (ret) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
+ sizeof(struct vhost_msg_v2);
+done:
+ return ret;
+}
+EXPORT_SYMBOL(vhost_chr_write_iter);
+
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait)
+{
+ __poll_t mask = 0;
+
+ poll_wait(file, &dev->wait, wait);
+
+ if (!list_empty(&dev->read_list))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+}
+EXPORT_SYMBOL(vhost_chr_poll);
+
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock)
+{
+ DEFINE_WAIT(wait);
+ struct vhost_msg_node *node;
+ ssize_t ret = 0;
+ unsigned size = sizeof(struct vhost_msg);
+
+ if (iov_iter_count(to) < size)
+ return 0;
+
+ while (1) {
+ if (!noblock)
+ prepare_to_wait(&dev->wait, &wait,
+ TASK_INTERRUPTIBLE);
+
+ node = vhost_dequeue_msg(dev, &dev->read_list);
+ if (node)
+ break;
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ if (!dev->iotlb) {
+ ret = -EBADFD;
+ break;
+ }
+
+ schedule();
+ }
+
+ if (!noblock)
+ finish_wait(&dev->wait, &wait);
+
+ if (node) {
+ struct vhost_iotlb_msg *msg;
+ void *start = &node->msg;
+
+ switch (node->msg.type) {
+ case VHOST_IOTLB_MSG:
+ size = sizeof(node->msg);
+ msg = &node->msg.iotlb;
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ size = sizeof(node->msg_v2);
+ msg = &node->msg_v2.iotlb;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ ret = copy_to_iter(start, size, to);
+ if (ret != size || msg->type != VHOST_IOTLB_MISS) {
+ kfree(node);
+ return ret;
+ }
+ vhost_enqueue_msg(dev, &dev->pending_list, node);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
+
+static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
+{
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_msg_node *node;
+ struct vhost_iotlb_msg *msg;
+ bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
+
+ node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
+ if (!node)
+ return -ENOMEM;
+
+ if (v2) {
+ node->msg_v2.type = VHOST_IOTLB_MSG_V2;
+ msg = &node->msg_v2.iotlb;
+ } else {
+ msg = &node->msg.iotlb;
+ }
+
+ msg->type = VHOST_IOTLB_MISS;
+ msg->iova = iova;
+ msg->perm = access;
+
+ vhost_enqueue_msg(dev, &dev->read_list, node);
+
+ return 0;
+}
+
+static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used)
+
+{
+ /* If an IOTLB device is present, the vring addresses are
+ * GIOVAs. Access validation occurs at prefetch time. */
+ if (vq->iotlb)
+ return true;
+
+ return access_ok(desc, vhost_get_desc_size(vq, num)) &&
+ access_ok(avail, vhost_get_avail_size(vq, num)) &&
+ access_ok(used, vhost_get_used_size(vq, num));
+}
+
+static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
+ const struct vhost_iotlb_map *map,
+ int type)
+{
+ int access = (type == VHOST_ADDR_USED) ?
+ VHOST_ACCESS_WO : VHOST_ACCESS_RO;
+
+ if (likely(map->perm & access))
+ vq->meta_iotlb[type] = map;
+}
+
+static bool iotlb_access_ok(struct vhost_virtqueue *vq,
+ int access, u64 addr, u64 len, int type)
+{
+ const struct vhost_iotlb_map *map;
+ struct vhost_iotlb *umem = vq->iotlb;
+ u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
+
+ if (vhost_vq_meta_fetch(vq, addr, len, type))
+ return true;
+
+ while (len > s) {
+ map = vhost_iotlb_itree_first(umem, addr, last);
+ if (map == NULL || map->start > addr) {
+ vhost_iotlb_miss(vq, addr, access);
+ return false;
+ } else if (!(map->perm & access)) {
+ /* Report the possible access violation by
+ * request another translation from userspace.
+ */
+ return false;
+ }
+
+ size = map->size - addr + map->start;
+
+ if (orig_addr == addr && size >= len)
+ vhost_vq_meta_update(vq, map, type);
+
+ s += size;
+ addr += size;
+ }
+
+ return true;
+}
+
+int vq_meta_prefetch(struct vhost_virtqueue *vq)
+{
+ unsigned int num = vq->num;
+
+ if (!vq->iotlb)
+ return 1;
+
+ return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
+ vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
+ iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
+ vhost_get_avail_size(vq, num),
+ VHOST_ADDR_AVAIL) &&
+ iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
+ vhost_get_used_size(vq, num), VHOST_ADDR_USED);
+}
+EXPORT_SYMBOL_GPL(vq_meta_prefetch);
+
+/* Can we log writes? */
+/* Caller should have device mutex but not vq mutex */
+bool vhost_log_access_ok(struct vhost_dev *dev)
+{
+ return memory_access_ok(dev, dev->umem, 1);
+}
+EXPORT_SYMBOL_GPL(vhost_log_access_ok);
+
+static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
+ void __user *log_base,
+ bool log_used,
+ u64 log_addr)
+{
+ /* If an IOTLB device is present, log_addr is a GIOVA that
+ * will never be logged by log_used(). */
+ if (vq->iotlb)
+ return true;
+
+ return !log_used || log_access_ok(log_base, log_addr,
+ vhost_get_used_size(vq, vq->num));
+}
+
+/* Verify access for write logging. */
+/* Caller should have vq mutex and device mutex */
+static bool vq_log_access_ok(struct vhost_virtqueue *vq,
+ void __user *log_base)
+{
+ return vq_memory_access_ok(log_base, vq->umem,
+ vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
+ vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
+}
+
+/* Can we start vq? */
+/* Caller should have vq mutex and device mutex */
+bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
+{
+ if (!vq_log_access_ok(vq, vq->log_base))
+ return false;
+
+ return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
+}
+EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
+
+static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
+{
+ struct vhost_memory mem, *newmem;
+ struct vhost_memory_region *region;
+ struct vhost_iotlb *newumem, *oldumem;
+ unsigned long size = offsetof(struct vhost_memory, regions);
+ int i;
+
+ if (copy_from_user(&mem, m, size))
+ return -EFAULT;
+ if (mem.padding)
+ return -EOPNOTSUPP;
+ if (mem.nregions > max_mem_regions)
+ return -E2BIG;
+ newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
+ GFP_KERNEL);
+ if (!newmem)
+ return -ENOMEM;
+
+ memcpy(newmem, &mem, size);
+ if (copy_from_user(newmem->regions, m->regions,
+ flex_array_size(newmem, regions, mem.nregions))) {
+ kvfree(newmem);
+ return -EFAULT;
+ }
+
+ newumem = iotlb_alloc();
+ if (!newumem) {
+ kvfree(newmem);
+ return -ENOMEM;
+ }
+
+ for (region = newmem->regions;
+ region < newmem->regions + mem.nregions;
+ region++) {
+ if (vhost_iotlb_add_range(newumem,
+ region->guest_phys_addr,
+ region->guest_phys_addr +
+ region->memory_size - 1,
+ region->userspace_addr,
+ VHOST_MAP_RW))
+ goto err;
+ }
+
+ if (!memory_access_ok(d, newumem, 0))
+ goto err;
+
+ oldumem = d->umem;
+ d->umem = newumem;
+
+ /* All memory accesses are done under some VQ mutex. */
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i]->mutex);
+ d->vqs[i]->umem = newumem;
+ mutex_unlock(&d->vqs[i]->mutex);
+ }
+
+ kvfree(newmem);
+ vhost_iotlb_free(oldumem);
+ return 0;
+
+err:
+ vhost_iotlb_free(newumem);
+ kvfree(newmem);
+ return -EFAULT;
+}
+
+static long vhost_vring_set_num(struct vhost_dev *d,
+ struct vhost_virtqueue *vq,
+ void __user *argp)
+{
+ struct vhost_vring_state s;
+
+ /* Resizing ring with an active backend?
+ * You don't want to do that. */
+ if (vq->private_data)
+ return -EBUSY;
+
+ if (copy_from_user(&s, argp, sizeof s))
+ return -EFAULT;
+
+ if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
+ return -EINVAL;
+ vq->num = s.num;
+
+ return 0;
+}
+
+static long vhost_vring_set_addr(struct vhost_dev *d,
+ struct vhost_virtqueue *vq,
+ void __user *argp)
+{
+ struct vhost_vring_addr a;
+
+ if (copy_from_user(&a, argp, sizeof a))
+ return -EFAULT;
+ if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
+ return -EOPNOTSUPP;
+
+ /* For 32bit, verify that the top 32bits of the user
+ data are set to zero. */
+ if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
+ (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
+ (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
+ return -EFAULT;
+
+ /* Make sure it's safe to cast pointers to vring types. */
+ BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
+ BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
+ if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
+ (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
+ (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
+ return -EINVAL;
+
+ /* We only verify access here if backend is configured.
+ * If it is not, we don't as size might not have been setup.
+ * We will verify when backend is configured. */
+ if (vq->private_data) {
+ if (!vq_access_ok(vq, vq->num,
+ (void __user *)(unsigned long)a.desc_user_addr,
+ (void __user *)(unsigned long)a.avail_user_addr,
+ (void __user *)(unsigned long)a.used_user_addr))
+ return -EINVAL;
+
+ /* Also validate log access for used ring if enabled. */
+ if (!vq_log_used_access_ok(vq, vq->log_base,
+ a.flags & (0x1 << VHOST_VRING_F_LOG),
+ a.log_guest_addr))
+ return -EINVAL;
+ }
+
+ vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
+ vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
+ vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
+ vq->log_addr = a.log_guest_addr;
+ vq->used = (void __user *)(unsigned long)a.used_user_addr;
+
+ return 0;
+}
+
+static long vhost_vring_set_num_addr(struct vhost_dev *d,
+ struct vhost_virtqueue *vq,
+ unsigned int ioctl,
+ void __user *argp)
+{
+ long r;
+
+ mutex_lock(&vq->mutex);
+
+ switch (ioctl) {
+ case VHOST_SET_VRING_NUM:
+ r = vhost_vring_set_num(d, vq, argp);
+ break;
+ case VHOST_SET_VRING_ADDR:
+ r = vhost_vring_set_addr(d, vq, argp);
+ break;
+ default:
+ BUG();
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ return r;
+}
+long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
+{
+ struct file *eventfp, *filep = NULL;
+ bool pollstart = false, pollstop = false;
+ struct eventfd_ctx *ctx = NULL;
+ u32 __user *idxp = argp;
+ struct vhost_virtqueue *vq;
+ struct vhost_vring_state s;
+ struct vhost_vring_file f;
+ u32 idx;
+ long r;
+
+ r = get_user(idx, idxp);
+ if (r < 0)
+ return r;
+ if (idx >= d->nvqs)
+ return -ENOBUFS;
+
+ idx = array_index_nospec(idx, d->nvqs);
+ vq = d->vqs[idx];
+
+ if (ioctl == VHOST_SET_VRING_NUM ||
+ ioctl == VHOST_SET_VRING_ADDR) {
+ return vhost_vring_set_num_addr(d, vq, ioctl, argp);
+ }
+
+ mutex_lock(&vq->mutex);
+
+ switch (ioctl) {
+ case VHOST_SET_VRING_BASE:
+ /* Moving base with an active backend?
+ * You don't want to do that. */
+ if (vq->private_data) {
+ r = -EBUSY;
+ break;
+ }
+ if (copy_from_user(&s, argp, sizeof s)) {
+ r = -EFAULT;
+ break;
+ }
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq->last_avail_idx = s.num & 0xffff;
+ vq->last_used_idx = (s.num >> 16) & 0xffff;
+ } else {
+ if (s.num > 0xffff) {
+ r = -EINVAL;
+ break;
+ }
+ vq->last_avail_idx = s.num;
+ }
+ /* Forget the cached index value. */
+ vq->avail_idx = vq->last_avail_idx;
+ break;
+ case VHOST_GET_VRING_BASE:
+ s.index = idx;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
+ s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
+ else
+ s.num = vq->last_avail_idx;
+ if (copy_to_user(argp, &s, sizeof s))
+ r = -EFAULT;
+ break;
+ case VHOST_SET_VRING_KICK:
+ if (copy_from_user(&f, argp, sizeof f)) {
+ r = -EFAULT;
+ break;
+ }
+ eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
+ if (IS_ERR(eventfp)) {
+ r = PTR_ERR(eventfp);
+ break;
+ }
+ if (eventfp != vq->kick) {
+ pollstop = (filep = vq->kick) != NULL;
+ pollstart = (vq->kick = eventfp) != NULL;
+ } else
+ filep = eventfp;
+ break;
+ case VHOST_SET_VRING_CALL:
+ if (copy_from_user(&f, argp, sizeof f)) {
+ r = -EFAULT;
+ break;
+ }
+ ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
+ if (IS_ERR(ctx)) {
+ r = PTR_ERR(ctx);
+ break;
+ }
+
+ swap(ctx, vq->call_ctx.ctx);
+ break;
+ case VHOST_SET_VRING_ERR:
+ if (copy_from_user(&f, argp, sizeof f)) {
+ r = -EFAULT;
+ break;
+ }
+ ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
+ if (IS_ERR(ctx)) {
+ r = PTR_ERR(ctx);
+ break;
+ }
+ swap(ctx, vq->error_ctx);
+ break;
+ case VHOST_SET_VRING_ENDIAN:
+ r = vhost_set_vring_endian(vq, argp);
+ break;
+ case VHOST_GET_VRING_ENDIAN:
+ r = vhost_get_vring_endian(vq, idx, argp);
+ break;
+ case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
+ if (copy_from_user(&s, argp, sizeof(s))) {
+ r = -EFAULT;
+ break;
+ }
+ vq->busyloop_timeout = s.num;
+ break;
+ case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
+ s.index = idx;
+ s.num = vq->busyloop_timeout;
+ if (copy_to_user(argp, &s, sizeof(s)))
+ r = -EFAULT;
+ break;
+ default:
+ r = -ENOIOCTLCMD;
+ }
+
+ if (pollstop && vq->handle_kick)
+ vhost_poll_stop(&vq->poll);
+
+ if (!IS_ERR_OR_NULL(ctx))
+ eventfd_ctx_put(ctx);
+ if (filep)
+ fput(filep);
+
+ if (pollstart && vq->handle_kick)
+ r = vhost_poll_start(&vq->poll, vq->kick);
+
+ mutex_unlock(&vq->mutex);
+
+ if (pollstop && vq->handle_kick)
+ vhost_dev_flush(vq->poll.dev);
+ return r;
+}
+EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
+
+int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
+{
+ struct vhost_iotlb *niotlb, *oiotlb;
+ int i;
+
+ niotlb = iotlb_alloc();
+ if (!niotlb)
+ return -ENOMEM;
+
+ oiotlb = d->iotlb;
+ d->iotlb = niotlb;
+
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq = d->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->iotlb = niotlb;
+ __vhost_vq_meta_reset(vq);
+ mutex_unlock(&vq->mutex);
+ }
+
+ vhost_iotlb_free(oiotlb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
+
+/* Caller must have device mutex */
+long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
+{
+ struct eventfd_ctx *ctx;
+ u64 p;
+ long r;
+ int i, fd;
+
+ /* If you are not the owner, you can become one */
+ if (ioctl == VHOST_SET_OWNER) {
+ r = vhost_dev_set_owner(d);
+ goto done;
+ }
+
+ /* You must be the owner to do anything else */
+ r = vhost_dev_check_owner(d);
+ if (r)
+ goto done;
+
+ switch (ioctl) {
+ case VHOST_SET_MEM_TABLE:
+ r = vhost_set_memory(d, argp);
+ break;
+ case VHOST_SET_LOG_BASE:
+ if (copy_from_user(&p, argp, sizeof p)) {
+ r = -EFAULT;
+ break;
+ }
+ if ((u64)(unsigned long)p != p) {
+ r = -EFAULT;
+ break;
+ }
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq;
+ void __user *base = (void __user *)(unsigned long)p;
+ vq = d->vqs[i];
+ mutex_lock(&vq->mutex);
+ /* If ring is inactive, will check when it's enabled. */
+ if (vq->private_data && !vq_log_access_ok(vq, base))
+ r = -EFAULT;
+ else
+ vq->log_base = base;
+ mutex_unlock(&vq->mutex);
+ }
+ break;
+ case VHOST_SET_LOG_FD:
+ r = get_user(fd, (int __user *)argp);
+ if (r < 0)
+ break;
+ ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
+ if (IS_ERR(ctx)) {
+ r = PTR_ERR(ctx);
+ break;
+ }
+ swap(ctx, d->log_ctx);
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i]->mutex);
+ d->vqs[i]->log_ctx = d->log_ctx;
+ mutex_unlock(&d->vqs[i]->mutex);
+ }
+ if (ctx)
+ eventfd_ctx_put(ctx);
+ break;
+ default:
+ r = -ENOIOCTLCMD;
+ break;
+ }
+done:
+ return r;
+}
+EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
+
+/* TODO: This is really inefficient. We need something like get_user()
+ * (instruction directly accesses the data, with an exception table entry
+ * returning -EFAULT). See Documentation/x86/exception-tables.rst.
+ */
+static int set_bit_to_user(int nr, void __user *addr)
+{
+ unsigned long log = (unsigned long)addr;
+ struct page *page;
+ void *base;
+ int bit = nr + (log % PAGE_SIZE) * 8;
+ int r;
+
+ r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
+ if (r < 0)
+ return r;
+ BUG_ON(r != 1);
+ base = kmap_atomic(page);
+ set_bit(bit, base);
+ kunmap_atomic(base);
+ unpin_user_pages_dirty_lock(&page, 1, true);
+ return 0;
+}
+
+static int log_write(void __user *log_base,
+ u64 write_address, u64 write_length)
+{
+ u64 write_page = write_address / VHOST_PAGE_SIZE;
+ int r;
+
+ if (!write_length)
+ return 0;
+ write_length += write_address % VHOST_PAGE_SIZE;
+ for (;;) {
+ u64 base = (u64)(unsigned long)log_base;
+ u64 log = base + write_page / 8;
+ int bit = write_page % 8;
+ if ((u64)(unsigned long)log != log)
+ return -EFAULT;
+ r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
+ if (r < 0)
+ return r;
+ if (write_length <= VHOST_PAGE_SIZE)
+ break;
+ write_length -= VHOST_PAGE_SIZE;
+ write_page += 1;
+ }
+ return r;
+}
+
+static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
+{
+ struct vhost_iotlb *umem = vq->umem;
+ struct vhost_iotlb_map *u;
+ u64 start, end, l, min;
+ int r;
+ bool hit = false;
+
+ while (len) {
+ min = len;
+ /* More than one GPAs can be mapped into a single HVA. So
+ * iterate all possible umems here to be safe.
+ */
+ list_for_each_entry(u, &umem->list, link) {
+ if (u->addr > hva - 1 + len ||
+ u->addr - 1 + u->size < hva)
+ continue;
+ start = max(u->addr, hva);
+ end = min(u->addr - 1 + u->size, hva - 1 + len);
+ l = end - start + 1;
+ r = log_write(vq->log_base,
+ u->start + start - u->addr,
+ l);
+ if (r < 0)
+ return r;
+ hit = true;
+ min = min(l, min);
+ }
+
+ if (!hit)
+ return -EFAULT;
+
+ len -= min;
+ hva += min;
+ }
+
+ return 0;
+}
+
+static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
+{
+ struct iovec *iov = vq->log_iov;
+ int i, ret;
+
+ if (!vq->iotlb)
+ return log_write(vq->log_base, vq->log_addr + used_offset, len);
+
+ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
+ len, iov, 64, VHOST_ACCESS_WO);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ret; i++) {
+ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
+ unsigned int log_num, u64 len, struct iovec *iov, int count)
+{
+ int i, r;
+
+ /* Make sure data written is seen before log. */
+ smp_wmb();
+
+ if (vq->iotlb) {
+ for (i = 0; i < count; i++) {
+ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+ }
+
+ for (i = 0; i < log_num; ++i) {
+ u64 l = min(log[i].len, len);
+ r = log_write(vq->log_base, log[i].addr, l);
+ if (r < 0)
+ return r;
+ len -= l;
+ if (!len) {
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ return 0;
+ }
+ }
+ /* Length written exceeds what we have stored. This is a bug. */
+ BUG();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_log_write);
+
+static int vhost_update_used_flags(struct vhost_virtqueue *vq)
+{
+ void __user *used;
+ if (vhost_put_used_flags(vq))
+ return -EFAULT;
+ if (unlikely(vq->log_used)) {
+ /* Make sure the flag is seen before log. */
+ smp_wmb();
+ /* Log used flag write. */
+ used = &vq->used->flags;
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof vq->used->flags);
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ }
+ return 0;
+}
+
+static int vhost_update_avail_event(struct vhost_virtqueue *vq)
+{
+ if (vhost_put_avail_event(vq))
+ return -EFAULT;
+ if (unlikely(vq->log_used)) {
+ void __user *used;
+ /* Make sure the event is seen before log. */
+ smp_wmb();
+ /* Log avail event write */
+ used = vhost_avail_event(vq);
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof *vhost_avail_event(vq));
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ }
+ return 0;
+}
+
+int vhost_vq_init_access(struct vhost_virtqueue *vq)
+{
+ __virtio16 last_used_idx;
+ int r;
+ bool is_le = vq->is_le;
+
+ if (!vq->private_data)
+ return 0;
+
+ vhost_init_is_le(vq);
+
+ r = vhost_update_used_flags(vq);
+ if (r)
+ goto err;
+ vq->signalled_used_valid = false;
+ if (!vq->iotlb &&
+ !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
+ r = -EFAULT;
+ goto err;
+ }
+ r = vhost_get_used_idx(vq, &last_used_idx);
+ if (r) {
+ vq_err(vq, "Can't access used idx at %p\n",
+ &vq->used->idx);
+ goto err;
+ }
+ vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
+ return 0;
+
+err:
+ vq->is_le = is_le;
+ return r;
+}
+EXPORT_SYMBOL_GPL(vhost_vq_init_access);
+
+static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ struct iovec iov[], int iov_size, int access)
+{
+ const struct vhost_iotlb_map *map;
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
+ struct iovec *_iov;
+ u64 s = 0, last = addr + len - 1;
+ int ret = 0;
+
+ while ((u64)len > s) {
+ u64 size;
+ if (unlikely(ret >= iov_size)) {
+ ret = -ENOBUFS;
+ break;
+ }
+
+ map = vhost_iotlb_itree_first(umem, addr, last);
+ if (map == NULL || map->start > addr) {
+ if (umem != dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = -EAGAIN;
+ break;
+ } else if (!(map->perm & access)) {
+ ret = -EPERM;
+ break;
+ }
+
+ _iov = iov + ret;
+ size = map->size - addr + map->start;
+ _iov->iov_len = min((u64)len - s, size);
+ _iov->iov_base = (void __user *)(unsigned long)
+ (map->addr + addr - map->start);
+ s += size;
+ addr += size;
+ ++ret;
+ }
+
+ if (ret == -EAGAIN)
+ vhost_iotlb_miss(vq, addr, access);
+ return ret;
+}
+
+/* Each buffer in the virtqueues is actually a chain of descriptors. This
+ * function returns the next descriptor in the chain,
+ * or -1U if we're at the end. */
+static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
+{
+ unsigned int next;
+
+ /* If this descriptor says it doesn't chain, we're done. */
+ if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
+ return -1U;
+
+ /* Check they're not leading us off end of descriptors. */
+ next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
+ return next;
+}
+
+static int get_indirect(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num,
+ struct vring_desc *indirect)
+{
+ struct vring_desc desc;
+ unsigned int i = 0, count, found = 0;
+ u32 len = vhost32_to_cpu(vq, indirect->len);
+ struct iov_iter from;
+ int ret, access;
+
+ /* Sanity check */
+ if (unlikely(len % sizeof desc)) {
+ vq_err(vq, "Invalid length in indirect descriptor: "
+ "len 0x%llx not multiple of 0x%zx\n",
+ (unsigned long long)len,
+ sizeof desc);
+ return -EINVAL;
+ }
+
+ ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
+ UIO_MAXIOV, VHOST_ACCESS_RO);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ return ret;
+ }
+ iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len);
+ count = len / sizeof desc;
+ /* Buffers are chained via a 16 bit next field, so
+ * we can have at most 2^16 of these. */
+ if (unlikely(count > USHRT_MAX + 1)) {
+ vq_err(vq, "Indirect buffer length too big: %d\n",
+ indirect->len);
+ return -E2BIG;
+ }
+
+ do {
+ unsigned iov_count = *in_num + *out_num;
+ if (unlikely(++found > count)) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "indirect size %u\n",
+ i, count);
+ return -EINVAL;
+ }
+ if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
+ vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
+ i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
+ return -EINVAL;
+ }
+ if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
+ vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
+ i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
+ return -EINVAL;
+ }
+
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
+
+ ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
+ vhost32_to_cpu(vq, desc.len), iov + iov_count,
+ iov_size - iov_count, access);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d indirect idx %d\n",
+ ret, i);
+ return ret;
+ }
+ /* If this is an input descriptor, increment that count. */
+ if (access == VHOST_ACCESS_WO) {
+ *in_num += ret;
+ if (unlikely(log && ret)) {
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
+ ++*log_num;
+ }
+ } else {
+ /* If it's an output descriptor, they're all supposed
+ * to come before any input descriptors. */
+ if (unlikely(*in_num)) {
+ vq_err(vq, "Indirect descriptor "
+ "has out after in: idx %d\n", i);
+ return -EINVAL;
+ }
+ *out_num += ret;
+ }
+ } while ((i = next_desc(vq, &desc)) != -1);
+ return 0;
+}
+
+/* This looks in the virtqueue and for the first available buffer, and converts
+ * it to an iovec for convenient access. Since descriptors consist of some
+ * number of output then some number of input descriptors, it's actually two
+ * iovecs, but we pack them into one and note how many of each there were.
+ *
+ * This function returns the descriptor number found, or vq->num (which is
+ * never a valid descriptor number) if none was found. A negative code is
+ * returned on error. */
+int vhost_get_vq_desc(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num)
+{
+ struct vring_desc desc;
+ unsigned int i, head, found = 0;
+ u16 last_avail_idx;
+ __virtio16 avail_idx;
+ __virtio16 ring_head;
+ int ret, access;
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ last_avail_idx = vq->last_avail_idx;
+
+ if (vq->avail_idx == vq->last_avail_idx) {
+ if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
+ vq_err(vq, "Failed to access avail idx at %p\n",
+ &vq->avail->idx);
+ return -EFAULT;
+ }
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+
+ if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
+ vq_err(vq, "Guest moved used index from %u to %u",
+ last_avail_idx, vq->avail_idx);
+ return -EFAULT;
+ }
+
+ /* If there's nothing new since last we looked, return
+ * invalid.
+ */
+ if (vq->avail_idx == last_avail_idx)
+ return vq->num;
+
+ /* Only get avail ring entries after they have been
+ * exposed by guest.
+ */
+ smp_rmb();
+ }
+
+ /* Grab the next descriptor number they're advertising, and increment
+ * the index we've seen. */
+ if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
+ vq_err(vq, "Failed to read head: idx %d address %p\n",
+ last_avail_idx,
+ &vq->avail->ring[last_avail_idx % vq->num]);
+ return -EFAULT;
+ }
+
+ head = vhost16_to_cpu(vq, ring_head);
+
+ /* If their number is silly, that's an error. */
+ if (unlikely(head >= vq->num)) {
+ vq_err(vq, "Guest says index %u > %u is available",
+ head, vq->num);
+ return -EINVAL;
+ }
+
+ /* When we start there are none of either input nor output. */
+ *out_num = *in_num = 0;
+ if (unlikely(log))
+ *log_num = 0;
+
+ i = head;
+ do {
+ unsigned iov_count = *in_num + *out_num;
+ if (unlikely(i >= vq->num)) {
+ vq_err(vq, "Desc index is %u > %u, head = %u",
+ i, vq->num, head);
+ return -EINVAL;
+ }
+ if (unlikely(++found > vq->num)) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "vq size %u head %u\n",
+ i, vq->num, head);
+ return -EINVAL;
+ }
+ ret = vhost_get_desc(vq, &desc, i);
+ if (unlikely(ret)) {
+ vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
+ i, vq->desc + i);
+ return -EFAULT;
+ }
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
+ ret = get_indirect(vq, iov, iov_size,
+ out_num, in_num,
+ log, log_num, &desc);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Failure detected "
+ "in indirect descriptor at idx %d\n", i);
+ return ret;
+ }
+ continue;
+ }
+
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
+ ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
+ vhost32_to_cpu(vq, desc.len), iov + iov_count,
+ iov_size - iov_count, access);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d descriptor idx %d\n",
+ ret, i);
+ return ret;
+ }
+ if (access == VHOST_ACCESS_WO) {
+ /* If this is an input descriptor,
+ * increment that count. */
+ *in_num += ret;
+ if (unlikely(log && ret)) {
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
+ ++*log_num;
+ }
+ } else {
+ /* If it's an output descriptor, they're all supposed
+ * to come before any input descriptors. */
+ if (unlikely(*in_num)) {
+ vq_err(vq, "Descriptor has out after in: "
+ "idx %d\n", i);
+ return -EINVAL;
+ }
+ *out_num += ret;
+ }
+ } while ((i = next_desc(vq, &desc)) != -1);
+
+ /* On success, increment avail index. */
+ vq->last_avail_idx++;
+
+ /* Assume notifications from guest are disabled at this point,
+ * if they aren't we would need to update avail_event index. */
+ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
+ return head;
+}
+EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
+
+/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
+void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
+{
+ vq->last_avail_idx -= n;
+}
+EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
+
+/* After we've used one of their buffers, we tell them about it. We'll then
+ * want to notify the guest, using eventfd. */
+int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
+{
+ struct vring_used_elem heads = {
+ cpu_to_vhost32(vq, head),
+ cpu_to_vhost32(vq, len)
+ };
+
+ return vhost_add_used_n(vq, &heads, 1);
+}
+EXPORT_SYMBOL_GPL(vhost_add_used);
+
+static int __vhost_add_used_n(struct vhost_virtqueue *vq,
+ struct vring_used_elem *heads,
+ unsigned count)
+{
+ vring_used_elem_t __user *used;
+ u16 old, new;
+ int start;
+
+ start = vq->last_used_idx & (vq->num - 1);
+ used = vq->used->ring + start;
+ if (vhost_put_used(vq, heads, start, count)) {
+ vq_err(vq, "Failed to write used");
+ return -EFAULT;
+ }
+ if (unlikely(vq->log_used)) {
+ /* Make sure data is seen before log. */
+ smp_wmb();
+ /* Log used ring entry write. */
+ log_used(vq, ((void __user *)used - (void __user *)vq->used),
+ count * sizeof *used);
+ }
+ old = vq->last_used_idx;
+ new = (vq->last_used_idx += count);
+ /* If the driver never bothers to signal in a very long while,
+ * used index might wrap around. If that happens, invalidate
+ * signalled_used index we stored. TODO: make sure driver
+ * signals at least once in 2^16 and remove this. */
+ if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
+ vq->signalled_used_valid = false;
+ return 0;
+}
+
+/* After we've used one of their buffers, we tell them about it. We'll then
+ * want to notify the guest, using eventfd. */
+int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
+ unsigned count)
+{
+ int start, n, r;
+
+ start = vq->last_used_idx & (vq->num - 1);
+ n = vq->num - start;
+ if (n < count) {
+ r = __vhost_add_used_n(vq, heads, n);
+ if (r < 0)
+ return r;
+ heads += n;
+ count -= n;
+ }
+ r = __vhost_add_used_n(vq, heads, count);
+
+ /* Make sure buffer is written before we update index. */
+ smp_wmb();
+ if (vhost_put_used_idx(vq)) {
+ vq_err(vq, "Failed to increment used idx");
+ return -EFAULT;
+ }
+ if (unlikely(vq->log_used)) {
+ /* Make sure used idx is seen before log. */
+ smp_wmb();
+ /* Log used index update. */
+ log_used(vq, offsetof(struct vring_used, idx),
+ sizeof vq->used->idx);
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ }
+ return r;
+}
+EXPORT_SYMBOL_GPL(vhost_add_used_n);
+
+static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ __u16 old, new;
+ __virtio16 event;
+ bool v;
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
+
+ if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ unlikely(vq->avail_idx == vq->last_avail_idx))
+ return true;
+
+ if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
+ __virtio16 flags;
+ if (vhost_get_avail_flags(vq, &flags)) {
+ vq_err(vq, "Failed to get flags");
+ return true;
+ }
+ return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
+ }
+ old = vq->signalled_used;
+ v = vq->signalled_used_valid;
+ new = vq->signalled_used = vq->last_used_idx;
+ vq->signalled_used_valid = true;
+
+ if (unlikely(!v))
+ return true;
+
+ if (vhost_get_used_event(vq, &event)) {
+ vq_err(vq, "Failed to get used event idx");
+ return true;
+ }
+ return vring_need_event(vhost16_to_cpu(vq, event), new, old);
+}
+
+/* This actually signals the guest, using eventfd. */
+void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ /* Signal the Guest tell them we used something up. */
+ if (vq->call_ctx.ctx && vhost_notify(dev, vq))
+ eventfd_signal(vq->call_ctx.ctx, 1);
+}
+EXPORT_SYMBOL_GPL(vhost_signal);
+
+/* And here's the combo meal deal. Supersize me! */
+void vhost_add_used_and_signal(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq,
+ unsigned int head, int len)
+{
+ vhost_add_used(vq, head, len);
+ vhost_signal(dev, vq);
+}
+EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
+
+/* multi-buffer version of vhost_add_used_and_signal */
+void vhost_add_used_and_signal_n(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq,
+ struct vring_used_elem *heads, unsigned count)
+{
+ vhost_add_used_n(vq, heads, count);
+ vhost_signal(dev, vq);
+}
+EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
+
+/* return true if we're sure that avaiable ring is empty */
+bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ __virtio16 avail_idx;
+ int r;
+
+ if (vq->avail_idx != vq->last_avail_idx)
+ return false;
+
+ r = vhost_get_avail_idx(vq, &avail_idx);
+ if (unlikely(r))
+ return false;
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+
+ return vq->avail_idx == vq->last_avail_idx;
+}
+EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
+
+/* OK, now we need to know about added descriptors. */
+bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ __virtio16 avail_idx;
+ int r;
+
+ if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
+ return false;
+ vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
+ if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
+ r = vhost_update_used_flags(vq);
+ if (r) {
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+ return false;
+ }
+ } else {
+ r = vhost_update_avail_event(vq);
+ if (r) {
+ vq_err(vq, "Failed to update avail event index at %p: %d\n",
+ vhost_avail_event(vq), r);
+ return false;
+ }
+ }
+ /* They could have slipped one in as we were doing that: make
+ * sure it's written, then check again. */
+ smp_mb();
+ r = vhost_get_avail_idx(vq, &avail_idx);
+ if (r) {
+ vq_err(vq, "Failed to check avail idx at %p: %d\n",
+ &vq->avail->idx, r);
+ return false;
+ }
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+
+ return vq->avail_idx != vq->last_avail_idx;
+}
+EXPORT_SYMBOL_GPL(vhost_enable_notify);
+
+/* We don't need to be notified again. */
+void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ int r;
+
+ if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
+ return;
+ vq->used_flags |= VRING_USED_F_NO_NOTIFY;
+ if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
+ r = vhost_update_used_flags(vq);
+ if (r)
+ vq_err(vq, "Failed to disable notification at %p: %d\n",
+ &vq->used->flags, r);
+ }
+}
+EXPORT_SYMBOL_GPL(vhost_disable_notify);
+
+/* Create a new message. */
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
+{
+ struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ /* Make sure all padding within the structure is initialized. */
+ memset(&node->msg, 0, sizeof node->msg);
+ node->vq = vq;
+ node->msg.type = type;
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_new_msg);
+
+void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
+ struct vhost_msg_node *node)
+{
+ spin_lock(&dev->iotlb_lock);
+ list_add_tail(&node->node, head);
+ spin_unlock(&dev->iotlb_lock);
+
+ wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
+}
+EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
+
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head)
+{
+ struct vhost_msg_node *node = NULL;
+
+ spin_lock(&dev->iotlb_lock);
+ if (!list_empty(head)) {
+ node = list_first_entry(head, struct vhost_msg_node,
+ node);
+ list_del(&node->node);
+ }
+ spin_unlock(&dev->iotlb_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
+
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ for (i = 0; i < dev->nvqs; ++i) {
+ vq = dev->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_backend_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(vhost_set_backend_features);
+
+static int __init vhost_init(void)
+{
+ return 0;
+}
+
+static void __exit vhost_exit(void)
+{
+}
+
+module_init(vhost_init);
+module_exit(vhost_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael S. Tsirkin");
+MODULE_DESCRIPTION("Host kernel accelerator for virtio");
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
new file mode 100644
index 000000000..5e17c4aa7
--- /dev/null
+++ b/drivers/vhost/vhost.h
@@ -0,0 +1,328 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VHOST_H
+#define _VHOST_H
+
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+#include <linux/atomic.h>
+#include <linux/vhost_iotlb.h>
+#include <linux/irqbypass.h>
+
+struct vhost_work;
+typedef void (*vhost_work_fn_t)(struct vhost_work *work);
+
+#define VHOST_WORK_QUEUED 1
+struct vhost_work {
+ struct llist_node node;
+ vhost_work_fn_t fn;
+ unsigned long flags;
+};
+
+/* Poll a file (eventfd or socket) */
+/* Note: there's nothing vhost specific about this structure. */
+struct vhost_poll {
+ poll_table table;
+ wait_queue_head_t *wqh;
+ wait_queue_entry_t wait;
+ struct vhost_work work;
+ __poll_t mask;
+ struct vhost_dev *dev;
+};
+
+void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
+void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
+bool vhost_has_work(struct vhost_dev *dev);
+
+void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
+ __poll_t mask, struct vhost_dev *dev);
+int vhost_poll_start(struct vhost_poll *poll, struct file *file);
+void vhost_poll_stop(struct vhost_poll *poll);
+void vhost_poll_queue(struct vhost_poll *poll);
+void vhost_dev_flush(struct vhost_dev *dev);
+
+struct vhost_log {
+ u64 addr;
+ u64 len;
+};
+
+enum vhost_uaddr_type {
+ VHOST_ADDR_DESC = 0,
+ VHOST_ADDR_AVAIL = 1,
+ VHOST_ADDR_USED = 2,
+ VHOST_NUM_ADDRS = 3,
+};
+
+struct vhost_vring_call {
+ struct eventfd_ctx *ctx;
+ struct irq_bypass_producer producer;
+};
+
+/* The virtqueue structure describes a queue attached to a device. */
+struct vhost_virtqueue {
+ struct vhost_dev *dev;
+
+ /* The actual ring of buffers. */
+ struct mutex mutex;
+ unsigned int num;
+ vring_desc_t __user *desc;
+ vring_avail_t __user *avail;
+ vring_used_t __user *used;
+ const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
+ struct file *kick;
+ struct vhost_vring_call call_ctx;
+ struct eventfd_ctx *error_ctx;
+ struct eventfd_ctx *log_ctx;
+
+ struct vhost_poll poll;
+
+ /* The routine to call when the Guest pings us, or timeout. */
+ vhost_work_fn_t handle_kick;
+
+ /* Last available index we saw.
+ * Values are limited to 0x7fff, and the high bit is used as
+ * a wrap counter when using VIRTIO_F_RING_PACKED. */
+ u16 last_avail_idx;
+
+ /* Caches available index value from user. */
+ u16 avail_idx;
+
+ /* Last index we used.
+ * Values are limited to 0x7fff, and the high bit is used as
+ * a wrap counter when using VIRTIO_F_RING_PACKED. */
+ u16 last_used_idx;
+
+ /* Used flags */
+ u16 used_flags;
+
+ /* Last used index value we have signalled on */
+ u16 signalled_used;
+
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+
+ /* Log writes to used structure. */
+ bool log_used;
+ u64 log_addr;
+
+ struct iovec iov[UIO_MAXIOV];
+ struct iovec iotlb_iov[64];
+ struct iovec *indirect;
+ struct vring_used_elem *heads;
+ /* Protected by virtqueue mutex. */
+ struct vhost_iotlb *umem;
+ struct vhost_iotlb *iotlb;
+ void *private_data;
+ u64 acked_features;
+ u64 acked_backend_features;
+ /* Log write descriptors */
+ void __user *log_base;
+ struct vhost_log *log;
+ struct iovec log_iov[64];
+
+ /* Ring endianness. Defaults to legacy native endianness.
+ * Set to true when starting a modern virtio device. */
+ bool is_le;
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+ /* Ring endianness requested by userspace for cross-endian support. */
+ bool user_be;
+#endif
+ u32 busyloop_timeout;
+};
+
+struct vhost_msg_node {
+ union {
+ struct vhost_msg msg;
+ struct vhost_msg_v2 msg_v2;
+ };
+ struct vhost_virtqueue *vq;
+ struct list_head node;
+};
+
+struct vhost_dev {
+ struct mm_struct *mm;
+ struct mutex mutex;
+ struct vhost_virtqueue **vqs;
+ int nvqs;
+ struct eventfd_ctx *log_ctx;
+ struct llist_head work_list;
+ struct task_struct *worker;
+ struct vhost_iotlb *umem;
+ struct vhost_iotlb *iotlb;
+ spinlock_t iotlb_lock;
+ struct list_head read_list;
+ struct list_head pending_list;
+ wait_queue_head_t wait;
+ int iov_limit;
+ int weight;
+ int byte_weight;
+ u64 kcov_handle;
+ bool use_worker;
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg);
+};
+
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
+ int nvqs, int iov_limit, int weight, int byte_weight,
+ bool use_worker,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg));
+long vhost_dev_set_owner(struct vhost_dev *dev);
+bool vhost_dev_has_owner(struct vhost_dev *dev);
+long vhost_dev_check_owner(struct vhost_dev *);
+struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
+void vhost_dev_cleanup(struct vhost_dev *);
+void vhost_dev_stop(struct vhost_dev *);
+long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
+long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
+bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
+bool vhost_log_access_ok(struct vhost_dev *);
+void vhost_clear_msg(struct vhost_dev *dev);
+
+int vhost_get_vq_desc(struct vhost_virtqueue *,
+ struct iovec iov[], unsigned int iov_count,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num);
+void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
+
+bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
+int vhost_vq_init_access(struct vhost_virtqueue *);
+int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
+int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
+ unsigned count);
+void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
+ unsigned int id, int len);
+void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
+ struct vring_used_elem *heads, unsigned count);
+void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
+void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
+bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
+bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
+
+int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
+ unsigned int log_num, u64 len,
+ struct iovec *iov, int count);
+int vq_meta_prefetch(struct vhost_virtqueue *vq);
+
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
+void vhost_enqueue_msg(struct vhost_dev *dev,
+ struct list_head *head,
+ struct vhost_msg_node *node);
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head);
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
+
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait);
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock);
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from);
+int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
+
+void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_map *map);
+
+#define vq_err(vq, fmt, ...) do { \
+ pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
+ if ((vq)->error_ctx) \
+ eventfd_signal((vq)->error_ctx, 1);\
+ } while (0)
+
+enum {
+ VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
+ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ULL << VIRTIO_RING_F_EVENT_IDX) |
+ (1ULL << VHOST_F_LOG_ALL) |
+ (1ULL << VIRTIO_F_ANY_LAYOUT) |
+ (1ULL << VIRTIO_F_VERSION_1)
+};
+
+/**
+ * vhost_vq_set_backend - Set backend.
+ *
+ * @vq Virtqueue.
+ * @private_data The private data.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ */
+static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
+ void *private_data)
+{
+ vq->private_data = private_data;
+}
+
+/**
+ * vhost_vq_get_backend - Get backend.
+ *
+ * @vq Virtqueue.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ * Return: Private data previously set with vhost_vq_set_backend.
+ */
+static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
+{
+ return vq->private_data;
+}
+
+static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
+{
+ return vq->acked_features & (1ULL << bit);
+}
+
+static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
+{
+ return vq->acked_backend_features & (1ULL << bit);
+}
+
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
+{
+ return vq->is_le;
+}
+#else
+static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
+{
+ return virtio_legacy_is_little_endian() || vq->is_le;
+}
+#endif
+
+/* Memory accessors */
+static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
+{
+ return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
+}
+
+static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
+{
+ return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
+}
+
+static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
+{
+ return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
+}
+
+static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
+{
+ return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
+}
+
+static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
+{
+ return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
+}
+
+static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
+{
+ return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
+}
+#endif
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
new file mode 100644
index 000000000..10bfc5f1c
--- /dev/null
+++ b/drivers/vhost/vringh.c
@@ -0,0 +1,1509 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Helpers for the host side of a virtio ring.
+ *
+ * Since these may be in userspace, we use (inline) accessors.
+ */
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/vringh.h>
+#include <linux/virtio_ring.h>
+#include <linux/kernel.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
+#include <linux/bvec.h>
+#include <linux/highmem.h>
+#include <linux/vhost_iotlb.h>
+#endif
+#include <uapi/linux/virtio_config.h>
+
+static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
+{
+ static DEFINE_RATELIMIT_STATE(vringh_rs,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ if (__ratelimit(&vringh_rs)) {
+ va_list ap;
+ va_start(ap, fmt);
+ printk(KERN_NOTICE "vringh:");
+ vprintk(fmt, ap);
+ va_end(ap);
+ }
+}
+
+/* Returns vring->num if empty, -ve on error. */
+static inline int __vringh_get_head(const struct vringh *vrh,
+ int (*getu16)(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p),
+ u16 *last_avail_idx)
+{
+ u16 avail_idx, i, head;
+ int err;
+
+ err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
+ if (err) {
+ vringh_bad("Failed to access avail idx at %p",
+ &vrh->vring.avail->idx);
+ return err;
+ }
+
+ if (*last_avail_idx == avail_idx)
+ return vrh->vring.num;
+
+ /* Only get avail ring entries after they have been exposed by guest. */
+ virtio_rmb(vrh->weak_barriers);
+
+ i = *last_avail_idx & (vrh->vring.num - 1);
+
+ err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
+ if (err) {
+ vringh_bad("Failed to read head: idx %d address %p",
+ *last_avail_idx, &vrh->vring.avail->ring[i]);
+ return err;
+ }
+
+ if (head >= vrh->vring.num) {
+ vringh_bad("Guest says index %u > %u is available",
+ head, vrh->vring.num);
+ return -EINVAL;
+ }
+
+ (*last_avail_idx)++;
+ return head;
+}
+
+/**
+ * vringh_kiov_advance - skip bytes from vring_kiov
+ * @iov: an iov passed to vringh_getdesc_*() (updated as we consume)
+ * @len: the maximum length to advance
+ */
+void vringh_kiov_advance(struct vringh_kiov *iov, size_t len)
+{
+ while (len && iov->i < iov->used) {
+ size_t partlen = min(iov->iov[iov->i].iov_len, len);
+
+ iov->consumed += partlen;
+ iov->iov[iov->i].iov_len -= partlen;
+ iov->iov[iov->i].iov_base += partlen;
+
+ if (!iov->iov[iov->i].iov_len) {
+ /* Fix up old iov element then increment. */
+ iov->iov[iov->i].iov_len = iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
+
+ len -= partlen;
+ }
+}
+EXPORT_SYMBOL(vringh_kiov_advance);
+
+/* Copy some bytes to/from the iovec. Returns num copied. */
+static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
+ struct vringh_kiov *iov,
+ void *ptr, size_t len,
+ int (*xfer)(const struct vringh *vrh,
+ void *addr, void *ptr,
+ size_t len))
+{
+ int err, done = 0;
+
+ while (len && iov->i < iov->used) {
+ size_t partlen;
+
+ partlen = min(iov->iov[iov->i].iov_len, len);
+ err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
+ if (err)
+ return err;
+ done += partlen;
+ len -= partlen;
+ ptr += partlen;
+ iov->consumed += partlen;
+ iov->iov[iov->i].iov_len -= partlen;
+ iov->iov[iov->i].iov_base += partlen;
+
+ if (!iov->iov[iov->i].iov_len) {
+ /* Fix up old iov element then increment. */
+ iov->iov[iov->i].iov_len = iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
+ }
+ return done;
+}
+
+/* May reduce *len if range is shorter. */
+static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
+ struct vringh_range *range,
+ bool (*getrange)(struct vringh *,
+ u64, struct vringh_range *))
+{
+ if (addr < range->start || addr > range->end_incl) {
+ if (!getrange(vrh, addr, range))
+ return false;
+ }
+ BUG_ON(addr < range->start || addr > range->end_incl);
+
+ /* To end of memory? */
+ if (unlikely(addr + *len == 0)) {
+ if (range->end_incl == -1ULL)
+ return true;
+ goto truncate;
+ }
+
+ /* Otherwise, don't wrap. */
+ if (addr + *len < addr) {
+ vringh_bad("Wrapping descriptor %zu@0x%llx",
+ *len, (unsigned long long)addr);
+ return false;
+ }
+
+ if (unlikely(addr + *len - 1 > range->end_incl))
+ goto truncate;
+ return true;
+
+truncate:
+ *len = range->end_incl + 1 - addr;
+ return true;
+}
+
+static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
+ struct vringh_range *range,
+ bool (*getrange)(struct vringh *,
+ u64, struct vringh_range *))
+{
+ return true;
+}
+
+/* No reason for this code to be inline. */
+static int move_to_indirect(const struct vringh *vrh,
+ int *up_next, u16 *i, void *addr,
+ const struct vring_desc *desc,
+ struct vring_desc **descs, int *desc_max)
+{
+ u32 len;
+
+ /* Indirect tables can't have indirect. */
+ if (*up_next != -1) {
+ vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
+ return -EINVAL;
+ }
+
+ len = vringh32_to_cpu(vrh, desc->len);
+ if (unlikely(len % sizeof(struct vring_desc))) {
+ vringh_bad("Strange indirect len %u", desc->len);
+ return -EINVAL;
+ }
+
+ /* We will check this when we follow it! */
+ if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
+ *up_next = vringh16_to_cpu(vrh, desc->next);
+ else
+ *up_next = -2;
+ *descs = addr;
+ *desc_max = len / sizeof(struct vring_desc);
+
+ /* Now, start at the first indirect. */
+ *i = 0;
+ return 0;
+}
+
+static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
+{
+ struct kvec *new;
+ unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
+
+ if (new_num < 8)
+ new_num = 8;
+
+ flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
+ if (flag)
+ new = krealloc_array(iov->iov, new_num,
+ sizeof(struct iovec), gfp);
+ else {
+ new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
+ if (new) {
+ memcpy(new, iov->iov,
+ iov->max_num * sizeof(struct iovec));
+ flag = VRINGH_IOV_ALLOCATED;
+ }
+ }
+ if (!new)
+ return -ENOMEM;
+ iov->iov = new;
+ iov->max_num = (new_num | flag);
+ return 0;
+}
+
+static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
+ struct vring_desc **descs, int *desc_max)
+{
+ u16 i = *up_next;
+
+ *up_next = -1;
+ *descs = vrh->vring.desc;
+ *desc_max = vrh->vring.num;
+ return i;
+}
+
+static int slow_copy(struct vringh *vrh, void *dst, const void *src,
+ bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
+ struct vringh_range *range,
+ bool (*getrange)(struct vringh *vrh,
+ u64,
+ struct vringh_range *)),
+ bool (*getrange)(struct vringh *vrh,
+ u64 addr,
+ struct vringh_range *r),
+ struct vringh_range *range,
+ int (*copy)(const struct vringh *vrh,
+ void *dst, const void *src, size_t len))
+{
+ size_t part, len = sizeof(struct vring_desc);
+
+ do {
+ u64 addr;
+ int err;
+
+ part = len;
+ addr = (u64)(unsigned long)src - range->offset;
+
+ if (!rcheck(vrh, addr, &part, range, getrange))
+ return -EINVAL;
+
+ err = copy(vrh, dst, src, part);
+ if (err)
+ return err;
+
+ dst += part;
+ src += part;
+ len -= part;
+ } while (len);
+ return 0;
+}
+
+static inline int
+__vringh_iov(struct vringh *vrh, u16 i,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
+ struct vringh_range *range,
+ bool (*getrange)(struct vringh *, u64,
+ struct vringh_range *)),
+ bool (*getrange)(struct vringh *, u64, struct vringh_range *),
+ gfp_t gfp,
+ int (*copy)(const struct vringh *vrh,
+ void *dst, const void *src, size_t len))
+{
+ int err, count = 0, indirect_count = 0, up_next, desc_max;
+ struct vring_desc desc, *descs;
+ struct vringh_range range = { -1ULL, 0 }, slowrange;
+ bool slow = false;
+
+ /* We start traversing vring's descriptor table. */
+ descs = vrh->vring.desc;
+ desc_max = vrh->vring.num;
+ up_next = -1;
+
+ /* You must want something! */
+ if (WARN_ON(!riov && !wiov))
+ return -EINVAL;
+
+ if (riov)
+ riov->i = riov->used = riov->consumed = 0;
+ if (wiov)
+ wiov->i = wiov->used = wiov->consumed = 0;
+
+ for (;;) {
+ void *addr;
+ struct vringh_kiov *iov;
+ size_t len;
+
+ if (unlikely(slow))
+ err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
+ &slowrange, copy);
+ else
+ err = copy(vrh, &desc, &descs[i], sizeof(desc));
+ if (unlikely(err))
+ goto fail;
+
+ if (unlikely(desc.flags &
+ cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
+ u64 a = vringh64_to_cpu(vrh, desc.addr);
+
+ /* Make sure it's OK, and get offset. */
+ len = vringh32_to_cpu(vrh, desc.len);
+ if (!rcheck(vrh, a, &len, &range, getrange)) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
+ slow = true;
+ /* We need to save this range to use offset */
+ slowrange = range;
+ }
+
+ addr = (void *)(long)(a + range.offset);
+ err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
+ &descs, &desc_max);
+ if (err)
+ goto fail;
+ continue;
+ }
+
+ if (up_next == -1)
+ count++;
+ else
+ indirect_count++;
+
+ if (count > vrh->vring.num || indirect_count > desc_max) {
+ vringh_bad("Descriptor loop in %p", descs);
+ err = -ELOOP;
+ goto fail;
+ }
+
+ if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
+ iov = wiov;
+ else {
+ iov = riov;
+ if (unlikely(wiov && wiov->used)) {
+ vringh_bad("Readable desc %p after writable",
+ &descs[i]);
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (!iov) {
+ vringh_bad("Unexpected %s desc",
+ !wiov ? "writable" : "readable");
+ err = -EPROTO;
+ goto fail;
+ }
+
+ again:
+ /* Make sure it's OK, and get offset. */
+ len = vringh32_to_cpu(vrh, desc.len);
+ if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
+ getrange)) {
+ err = -EINVAL;
+ goto fail;
+ }
+ addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
+ range.offset);
+
+ if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
+ err = resize_iovec(iov, gfp);
+ if (err)
+ goto fail;
+ }
+
+ iov->iov[iov->used].iov_base = addr;
+ iov->iov[iov->used].iov_len = len;
+ iov->used++;
+
+ if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
+ desc.len = cpu_to_vringh32(vrh,
+ vringh32_to_cpu(vrh, desc.len) - len);
+ desc.addr = cpu_to_vringh64(vrh,
+ vringh64_to_cpu(vrh, desc.addr) + len);
+ goto again;
+ }
+
+ if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
+ i = vringh16_to_cpu(vrh, desc.next);
+ } else {
+ /* Just in case we need to finish traversing above. */
+ if (unlikely(up_next > 0)) {
+ i = return_from_indirect(vrh, &up_next,
+ &descs, &desc_max);
+ slow = false;
+ indirect_count = 0;
+ } else
+ break;
+ }
+
+ if (i >= desc_max) {
+ vringh_bad("Chained index %u > %u", i, desc_max);
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ return err;
+}
+
+static inline int __vringh_complete(struct vringh *vrh,
+ const struct vring_used_elem *used,
+ unsigned int num_used,
+ int (*putu16)(const struct vringh *vrh,
+ __virtio16 *p, u16 val),
+ int (*putused)(const struct vringh *vrh,
+ struct vring_used_elem *dst,
+ const struct vring_used_elem
+ *src, unsigned num))
+{
+ struct vring_used *used_ring;
+ int err;
+ u16 used_idx, off;
+
+ used_ring = vrh->vring.used;
+ used_idx = vrh->last_used_idx + vrh->completed;
+
+ off = used_idx % vrh->vring.num;
+
+ /* Compiler knows num_used == 1 sometimes, hence extra check */
+ if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
+ u16 part = vrh->vring.num - off;
+ err = putused(vrh, &used_ring->ring[off], used, part);
+ if (!err)
+ err = putused(vrh, &used_ring->ring[0], used + part,
+ num_used - part);
+ } else
+ err = putused(vrh, &used_ring->ring[off], used, num_used);
+
+ if (err) {
+ vringh_bad("Failed to write %u used entries %u at %p",
+ num_used, off, &used_ring->ring[off]);
+ return err;
+ }
+
+ /* Make sure buffer is written before we update index. */
+ virtio_wmb(vrh->weak_barriers);
+
+ err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
+ if (err) {
+ vringh_bad("Failed to update used index at %p",
+ &vrh->vring.used->idx);
+ return err;
+ }
+
+ vrh->completed += num_used;
+ return 0;
+}
+
+
+static inline int __vringh_need_notify(struct vringh *vrh,
+ int (*getu16)(const struct vringh *vrh,
+ u16 *val,
+ const __virtio16 *p))
+{
+ bool notify;
+ u16 used_event;
+ int err;
+
+ /* Flush out used index update. This is paired with the
+ * barrier that the Guest executes when enabling
+ * interrupts. */
+ virtio_mb(vrh->weak_barriers);
+
+ /* Old-style, without event indices. */
+ if (!vrh->event_indices) {
+ u16 flags;
+ err = getu16(vrh, &flags, &vrh->vring.avail->flags);
+ if (err) {
+ vringh_bad("Failed to get flags at %p",
+ &vrh->vring.avail->flags);
+ return err;
+ }
+ return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
+ }
+
+ /* Modern: we know when other side wants to know. */
+ err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
+ if (err) {
+ vringh_bad("Failed to get used event idx at %p",
+ &vring_used_event(&vrh->vring));
+ return err;
+ }
+
+ /* Just in case we added so many that we wrap. */
+ if (unlikely(vrh->completed > 0xffff))
+ notify = true;
+ else
+ notify = vring_need_event(used_event,
+ vrh->last_used_idx + vrh->completed,
+ vrh->last_used_idx);
+
+ vrh->last_used_idx += vrh->completed;
+ vrh->completed = 0;
+ return notify;
+}
+
+static inline bool __vringh_notify_enable(struct vringh *vrh,
+ int (*getu16)(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p),
+ int (*putu16)(const struct vringh *vrh,
+ __virtio16 *p, u16 val))
+{
+ u16 avail;
+
+ if (!vrh->event_indices) {
+ /* Old-school; update flags. */
+ if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
+ vringh_bad("Clearing used flags %p",
+ &vrh->vring.used->flags);
+ return true;
+ }
+ } else {
+ if (putu16(vrh, &vring_avail_event(&vrh->vring),
+ vrh->last_avail_idx) != 0) {
+ vringh_bad("Updating avail event index %p",
+ &vring_avail_event(&vrh->vring));
+ return true;
+ }
+ }
+
+ /* They could have slipped one in as we were doing that: make
+ * sure it's written, then check again. */
+ virtio_mb(vrh->weak_barriers);
+
+ if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
+ vringh_bad("Failed to check avail idx at %p",
+ &vrh->vring.avail->idx);
+ return true;
+ }
+
+ /* This is unlikely, so we just leave notifications enabled
+ * (if we're using event_indices, we'll only get one
+ * notification anyway). */
+ return avail == vrh->last_avail_idx;
+}
+
+static inline void __vringh_notify_disable(struct vringh *vrh,
+ int (*putu16)(const struct vringh *vrh,
+ __virtio16 *p, u16 val))
+{
+ if (!vrh->event_indices) {
+ /* Old-school; update flags. */
+ if (putu16(vrh, &vrh->vring.used->flags,
+ VRING_USED_F_NO_NOTIFY)) {
+ vringh_bad("Setting used flags %p",
+ &vrh->vring.used->flags);
+ }
+ }
+}
+
+/* Userspace access helpers: in this case, addresses are really userspace. */
+static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
+{
+ __virtio16 v = 0;
+ int rc = get_user(v, (__force __virtio16 __user *)p);
+ *val = vringh16_to_cpu(vrh, v);
+ return rc;
+}
+
+static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
+{
+ __virtio16 v = cpu_to_vringh16(vrh, val);
+ return put_user(v, (__force __virtio16 __user *)p);
+}
+
+static inline int copydesc_user(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
+{
+ return copy_from_user(dst, (__force void __user *)src, len) ?
+ -EFAULT : 0;
+}
+
+static inline int putused_user(const struct vringh *vrh,
+ struct vring_used_elem *dst,
+ const struct vring_used_elem *src,
+ unsigned int num)
+{
+ return copy_to_user((__force void __user *)dst, src,
+ sizeof(*dst) * num) ? -EFAULT : 0;
+}
+
+static inline int xfer_from_user(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
+{
+ return copy_from_user(dst, (__force void __user *)src, len) ?
+ -EFAULT : 0;
+}
+
+static inline int xfer_to_user(const struct vringh *vrh,
+ void *dst, void *src, size_t len)
+{
+ return copy_to_user((__force void __user *)dst, src, len) ?
+ -EFAULT : 0;
+}
+
+/**
+ * vringh_init_user - initialize a vringh for a userspace vring.
+ * @vrh: the vringh to initialize.
+ * @features: the feature bits for this ring.
+ * @num: the number of elements.
+ * @weak_barriers: true if we only need memory barriers, not I/O.
+ * @desc: the userpace descriptor pointer.
+ * @avail: the userpace avail pointer.
+ * @used: the userpace used pointer.
+ *
+ * Returns an error if num is invalid: you should check pointers
+ * yourself!
+ */
+int vringh_init_user(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used)
+{
+ /* Sane power of 2 please! */
+ if (!num || num > 0xffff || (num & (num - 1))) {
+ vringh_bad("Bad ring size %u", num);
+ return -EINVAL;
+ }
+
+ vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
+ vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
+ vrh->weak_barriers = weak_barriers;
+ vrh->completed = 0;
+ vrh->last_avail_idx = 0;
+ vrh->last_used_idx = 0;
+ vrh->vring.num = num;
+ /* vring expects kernel addresses, but only used via accessors. */
+ vrh->vring.desc = (__force struct vring_desc *)desc;
+ vrh->vring.avail = (__force struct vring_avail *)avail;
+ vrh->vring.used = (__force struct vring_used *)used;
+ return 0;
+}
+EXPORT_SYMBOL(vringh_init_user);
+
+/**
+ * vringh_getdesc_user - get next available descriptor from userspace ring.
+ * @vrh: the userspace vring.
+ * @riov: where to put the readable descriptors (or NULL)
+ * @wiov: where to put the writable descriptors (or NULL)
+ * @getrange: function to call to check ranges.
+ * @head: head index we received, for passing to vringh_complete_user().
+ *
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
+ *
+ * Note that on error return, you can tell the difference between an
+ * invalid ring and a single invalid descriptor: in the former case,
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
+ * descriptor, but there's not much you can do with an invalid ring.
+ *
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_iov_cleanup() to release the memory, even on error!
+ */
+int vringh_getdesc_user(struct vringh *vrh,
+ struct vringh_iov *riov,
+ struct vringh_iov *wiov,
+ bool (*getrange)(struct vringh *vrh,
+ u64 addr, struct vringh_range *r),
+ u16 *head)
+{
+ int err;
+
+ *head = vrh->vring.num;
+ err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
+ if (err < 0)
+ return err;
+
+ /* Empty... */
+ if (err == vrh->vring.num)
+ return 0;
+
+ /* We need the layouts to be the identical for this to work */
+ BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
+ BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
+ offsetof(struct vringh_iov, iov));
+ BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
+ offsetof(struct vringh_iov, i));
+ BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
+ offsetof(struct vringh_iov, used));
+ BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
+ offsetof(struct vringh_iov, max_num));
+ BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+ BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
+ offsetof(struct kvec, iov_base));
+ BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
+ offsetof(struct kvec, iov_len));
+ BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
+ != sizeof(((struct kvec *)NULL)->iov_base));
+ BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
+ != sizeof(((struct kvec *)NULL)->iov_len));
+
+ *head = err;
+ err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
+ (struct vringh_kiov *)wiov,
+ range_check, getrange, GFP_KERNEL, copydesc_user);
+ if (err)
+ return err;
+
+ return 1;
+}
+EXPORT_SYMBOL(vringh_getdesc_user);
+
+/**
+ * vringh_iov_pull_user - copy bytes from vring_iov.
+ * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
+{
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
+ dst, len, xfer_from_user);
+}
+EXPORT_SYMBOL(vringh_iov_pull_user);
+
+/**
+ * vringh_iov_push_user - copy bytes into vring_iov.
+ * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
+ * @src: the place to copy from.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
+ const void *src, size_t len)
+{
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
+ (void *)src, len, xfer_to_user);
+}
+EXPORT_SYMBOL(vringh_iov_push_user);
+
+/**
+ * vringh_abandon_user - we've decided not to handle the descriptor(s).
+ * @vrh: the vring.
+ * @num: the number of descriptors to put back (ie. num
+ * vringh_get_user() to undo).
+ *
+ * The next vringh_get_user() will return the old descriptor(s) again.
+ */
+void vringh_abandon_user(struct vringh *vrh, unsigned int num)
+{
+ /* We only update vring_avail_event(vr) when we want to be notified,
+ * so we haven't changed that yet. */
+ vrh->last_avail_idx -= num;
+}
+EXPORT_SYMBOL(vringh_abandon_user);
+
+/**
+ * vringh_complete_user - we've finished with descriptor, publish it.
+ * @vrh: the vring.
+ * @head: the head as filled in by vringh_getdesc_user.
+ * @len: the length of data we have written.
+ *
+ * You should check vringh_need_notify_user() after one or more calls
+ * to this function.
+ */
+int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
+{
+ struct vring_used_elem used;
+
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
+ return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
+}
+EXPORT_SYMBOL(vringh_complete_user);
+
+/**
+ * vringh_complete_multi_user - we've finished with many descriptors.
+ * @vrh: the vring.
+ * @used: the head, length pairs.
+ * @num_used: the number of used elements.
+ *
+ * You should check vringh_need_notify_user() after one or more calls
+ * to this function.
+ */
+int vringh_complete_multi_user(struct vringh *vrh,
+ const struct vring_used_elem used[],
+ unsigned num_used)
+{
+ return __vringh_complete(vrh, used, num_used,
+ putu16_user, putused_user);
+}
+EXPORT_SYMBOL(vringh_complete_multi_user);
+
+/**
+ * vringh_notify_enable_user - we want to know if something changes.
+ * @vrh: the vring.
+ *
+ * This always enables notifications, but returns false if there are
+ * now more buffers available in the vring.
+ */
+bool vringh_notify_enable_user(struct vringh *vrh)
+{
+ return __vringh_notify_enable(vrh, getu16_user, putu16_user);
+}
+EXPORT_SYMBOL(vringh_notify_enable_user);
+
+/**
+ * vringh_notify_disable_user - don't tell us if something changes.
+ * @vrh: the vring.
+ *
+ * This is our normal running state: we disable and then only enable when
+ * we're going to sleep.
+ */
+void vringh_notify_disable_user(struct vringh *vrh)
+{
+ __vringh_notify_disable(vrh, putu16_user);
+}
+EXPORT_SYMBOL(vringh_notify_disable_user);
+
+/**
+ * vringh_need_notify_user - must we tell the other side about used buffers?
+ * @vrh: the vring we've called vringh_complete_user() on.
+ *
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
+ */
+int vringh_need_notify_user(struct vringh *vrh)
+{
+ return __vringh_need_notify(vrh, getu16_user);
+}
+EXPORT_SYMBOL(vringh_need_notify_user);
+
+/* Kernelspace access helpers. */
+static inline int getu16_kern(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p)
+{
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
+ return 0;
+}
+
+static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
+{
+ WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
+ return 0;
+}
+
+static inline int copydesc_kern(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
+{
+ memcpy(dst, src, len);
+ return 0;
+}
+
+static inline int putused_kern(const struct vringh *vrh,
+ struct vring_used_elem *dst,
+ const struct vring_used_elem *src,
+ unsigned int num)
+{
+ memcpy(dst, src, num * sizeof(*dst));
+ return 0;
+}
+
+static inline int xfer_kern(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
+{
+ memcpy(dst, src, len);
+ return 0;
+}
+
+static inline int kern_xfer(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
+{
+ memcpy(dst, src, len);
+ return 0;
+}
+
+/**
+ * vringh_init_kern - initialize a vringh for a kernelspace vring.
+ * @vrh: the vringh to initialize.
+ * @features: the feature bits for this ring.
+ * @num: the number of elements.
+ * @weak_barriers: true if we only need memory barriers, not I/O.
+ * @desc: the userpace descriptor pointer.
+ * @avail: the userpace avail pointer.
+ * @used: the userpace used pointer.
+ *
+ * Returns an error if num is invalid.
+ */
+int vringh_init_kern(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used)
+{
+ /* Sane power of 2 please! */
+ if (!num || num > 0xffff || (num & (num - 1))) {
+ vringh_bad("Bad ring size %u", num);
+ return -EINVAL;
+ }
+
+ vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
+ vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
+ vrh->weak_barriers = weak_barriers;
+ vrh->completed = 0;
+ vrh->last_avail_idx = 0;
+ vrh->last_used_idx = 0;
+ vrh->vring.num = num;
+ vrh->vring.desc = desc;
+ vrh->vring.avail = avail;
+ vrh->vring.used = used;
+ return 0;
+}
+EXPORT_SYMBOL(vringh_init_kern);
+
+/**
+ * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
+ * @vrh: the kernelspace vring.
+ * @riov: where to put the readable descriptors (or NULL)
+ * @wiov: where to put the writable descriptors (or NULL)
+ * @head: head index we received, for passing to vringh_complete_kern().
+ * @gfp: flags for allocating larger riov/wiov.
+ *
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
+ *
+ * Note that on error return, you can tell the difference between an
+ * invalid ring and a single invalid descriptor: in the former case,
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
+ * descriptor, but there's not much you can do with an invalid ring.
+ *
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_kiov_cleanup() to release the memory, even on error!
+ */
+int vringh_getdesc_kern(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ u16 *head,
+ gfp_t gfp)
+{
+ int err;
+
+ err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
+ if (err < 0)
+ return err;
+
+ /* Empty... */
+ if (err == vrh->vring.num)
+ return 0;
+
+ *head = err;
+ err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
+ gfp, copydesc_kern);
+ if (err)
+ return err;
+
+ return 1;
+}
+EXPORT_SYMBOL(vringh_getdesc_kern);
+
+/**
+ * vringh_iov_pull_kern - copy bytes from vring_iov.
+ * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
+{
+ return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
+}
+EXPORT_SYMBOL(vringh_iov_pull_kern);
+
+/**
+ * vringh_iov_push_kern - copy bytes into vring_iov.
+ * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
+ * @src: the place to copy from.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
+ const void *src, size_t len)
+{
+ return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
+}
+EXPORT_SYMBOL(vringh_iov_push_kern);
+
+/**
+ * vringh_abandon_kern - we've decided not to handle the descriptor(s).
+ * @vrh: the vring.
+ * @num: the number of descriptors to put back (ie. num
+ * vringh_get_kern() to undo).
+ *
+ * The next vringh_get_kern() will return the old descriptor(s) again.
+ */
+void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
+{
+ /* We only update vring_avail_event(vr) when we want to be notified,
+ * so we haven't changed that yet. */
+ vrh->last_avail_idx -= num;
+}
+EXPORT_SYMBOL(vringh_abandon_kern);
+
+/**
+ * vringh_complete_kern - we've finished with descriptor, publish it.
+ * @vrh: the vring.
+ * @head: the head as filled in by vringh_getdesc_kern.
+ * @len: the length of data we have written.
+ *
+ * You should check vringh_need_notify_kern() after one or more calls
+ * to this function.
+ */
+int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
+{
+ struct vring_used_elem used;
+
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
+
+ return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
+}
+EXPORT_SYMBOL(vringh_complete_kern);
+
+/**
+ * vringh_notify_enable_kern - we want to know if something changes.
+ * @vrh: the vring.
+ *
+ * This always enables notifications, but returns false if there are
+ * now more buffers available in the vring.
+ */
+bool vringh_notify_enable_kern(struct vringh *vrh)
+{
+ return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
+}
+EXPORT_SYMBOL(vringh_notify_enable_kern);
+
+/**
+ * vringh_notify_disable_kern - don't tell us if something changes.
+ * @vrh: the vring.
+ *
+ * This is our normal running state: we disable and then only enable when
+ * we're going to sleep.
+ */
+void vringh_notify_disable_kern(struct vringh *vrh)
+{
+ __vringh_notify_disable(vrh, putu16_kern);
+}
+EXPORT_SYMBOL(vringh_notify_disable_kern);
+
+/**
+ * vringh_need_notify_kern - must we tell the other side about used buffers?
+ * @vrh: the vring we've called vringh_complete_kern() on.
+ *
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
+ */
+int vringh_need_notify_kern(struct vringh *vrh)
+{
+ return __vringh_need_notify(vrh, getu16_kern);
+}
+EXPORT_SYMBOL(vringh_need_notify_kern);
+
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
+
+static int iotlb_translate(const struct vringh *vrh,
+ u64 addr, u64 len, u64 *translated,
+ struct bio_vec iov[],
+ int iov_size, u32 perm)
+{
+ struct vhost_iotlb_map *map;
+ struct vhost_iotlb *iotlb = vrh->iotlb;
+ int ret = 0;
+ u64 s = 0, last = addr + len - 1;
+
+ spin_lock(vrh->iotlb_lock);
+
+ while (len > s) {
+ u64 size, pa, pfn;
+
+ if (unlikely(ret >= iov_size)) {
+ ret = -ENOBUFS;
+ break;
+ }
+
+ map = vhost_iotlb_itree_first(iotlb, addr, last);
+ if (!map || map->start > addr) {
+ ret = -EINVAL;
+ break;
+ } else if (!(map->perm & perm)) {
+ ret = -EPERM;
+ break;
+ }
+
+ size = map->size - addr + map->start;
+ pa = map->addr + addr - map->start;
+ pfn = pa >> PAGE_SHIFT;
+ iov[ret].bv_page = pfn_to_page(pfn);
+ iov[ret].bv_len = min(len - s, size);
+ iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
+ s += size;
+ addr += size;
+ ++ret;
+ }
+
+ spin_unlock(vrh->iotlb_lock);
+
+ if (translated)
+ *translated = min(len, s);
+
+ return ret;
+}
+
+static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
+{
+ u64 total_translated = 0;
+
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_RO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
+
+ iov_iter_bvec(&iter, ITER_SOURCE, iov, ret, translated);
+
+ ret = copy_from_iter(dst, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
+
+ return total_translated;
+}
+
+static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
+{
+ u64 total_translated = 0;
+
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_WO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
+
+ iov_iter_bvec(&iter, ITER_DEST, iov, ret, translated);
+
+ ret = copy_to_iter(src, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
+
+ return total_translated;
+}
+
+static inline int getu16_iotlb(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p)
+{
+ struct bio_vec iov;
+ void *kaddr, *from;
+ int ret;
+
+ /* Atomic read is needed for getu16 */
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
+ &iov, 1, VHOST_MAP_RO);
+ if (ret < 0)
+ return ret;
+
+ kaddr = kmap_atomic(iov.bv_page);
+ from = kaddr + iov.bv_offset;
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
+ kunmap_atomic(kaddr);
+
+ return 0;
+}
+
+static inline int putu16_iotlb(const struct vringh *vrh,
+ __virtio16 *p, u16 val)
+{
+ struct bio_vec iov;
+ void *kaddr, *to;
+ int ret;
+
+ /* Atomic write is needed for putu16 */
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
+ &iov, 1, VHOST_MAP_WO);
+ if (ret < 0)
+ return ret;
+
+ kaddr = kmap_atomic(iov.bv_page);
+ to = kaddr + iov.bv_offset;
+ WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
+ kunmap_atomic(kaddr);
+
+ return 0;
+}
+
+static inline int copydesc_iotlb(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
+{
+ int ret;
+
+ ret = copy_from_iotlb(vrh, dst, (void *)src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
+{
+ int ret;
+
+ ret = copy_from_iotlb(vrh, dst, src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int xfer_to_iotlb(const struct vringh *vrh,
+ void *dst, void *src, size_t len)
+{
+ int ret;
+
+ ret = copy_to_iotlb(vrh, dst, src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int putused_iotlb(const struct vringh *vrh,
+ struct vring_used_elem *dst,
+ const struct vring_used_elem *src,
+ unsigned int num)
+{
+ int size = num * sizeof(*dst);
+ int ret;
+
+ ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
+ if (ret != size)
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
+ * @vrh: the vringh to initialize.
+ * @features: the feature bits for this ring.
+ * @num: the number of elements.
+ * @weak_barriers: true if we only need memory barriers, not I/O.
+ * @desc: the userpace descriptor pointer.
+ * @avail: the userpace avail pointer.
+ * @used: the userpace used pointer.
+ *
+ * Returns an error if num is invalid.
+ */
+int vringh_init_iotlb(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used)
+{
+ return vringh_init_kern(vrh, features, num, weak_barriers,
+ desc, avail, used);
+}
+EXPORT_SYMBOL(vringh_init_iotlb);
+
+/**
+ * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
+ * @vrh: the vring
+ * @iotlb: iotlb associated with this vring
+ * @iotlb_lock: spinlock to synchronize the iotlb accesses
+ */
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
+ spinlock_t *iotlb_lock)
+{
+ vrh->iotlb = iotlb;
+ vrh->iotlb_lock = iotlb_lock;
+}
+EXPORT_SYMBOL(vringh_set_iotlb);
+
+/**
+ * vringh_getdesc_iotlb - get next available descriptor from ring with
+ * IOTLB.
+ * @vrh: the kernelspace vring.
+ * @riov: where to put the readable descriptors (or NULL)
+ * @wiov: where to put the writable descriptors (or NULL)
+ * @head: head index we received, for passing to vringh_complete_iotlb().
+ * @gfp: flags for allocating larger riov/wiov.
+ *
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
+ *
+ * Note that on error return, you can tell the difference between an
+ * invalid ring and a single invalid descriptor: in the former case,
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
+ * descriptor, but there's not much you can do with an invalid ring.
+ *
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_kiov_cleanup() to release the memory, even on error!
+ */
+int vringh_getdesc_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ u16 *head,
+ gfp_t gfp)
+{
+ int err;
+
+ err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
+ if (err < 0)
+ return err;
+
+ /* Empty... */
+ if (err == vrh->vring.num)
+ return 0;
+
+ *head = err;
+ err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
+ gfp, copydesc_iotlb);
+ if (err)
+ return err;
+
+ return 1;
+}
+EXPORT_SYMBOL(vringh_getdesc_iotlb);
+
+/**
+ * vringh_iov_pull_iotlb - copy bytes from vring_iov.
+ * @vrh: the vring.
+ * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ void *dst, size_t len)
+{
+ return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
+}
+EXPORT_SYMBOL(vringh_iov_pull_iotlb);
+
+/**
+ * vringh_iov_push_iotlb - copy bytes into vring_iov.
+ * @vrh: the vring.
+ * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
+ * @src: the place to copy from.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
+ struct vringh_kiov *wiov,
+ const void *src, size_t len)
+{
+ return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
+}
+EXPORT_SYMBOL(vringh_iov_push_iotlb);
+
+/**
+ * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
+ * @vrh: the vring.
+ * @num: the number of descriptors to put back (ie. num
+ * vringh_get_iotlb() to undo).
+ *
+ * The next vringh_get_iotlb() will return the old descriptor(s) again.
+ */
+void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
+{
+ /* We only update vring_avail_event(vr) when we want to be notified,
+ * so we haven't changed that yet.
+ */
+ vrh->last_avail_idx -= num;
+}
+EXPORT_SYMBOL(vringh_abandon_iotlb);
+
+/**
+ * vringh_complete_iotlb - we've finished with descriptor, publish it.
+ * @vrh: the vring.
+ * @head: the head as filled in by vringh_getdesc_iotlb.
+ * @len: the length of data we have written.
+ *
+ * You should check vringh_need_notify_iotlb() after one or more calls
+ * to this function.
+ */
+int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
+{
+ struct vring_used_elem used;
+
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
+
+ return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
+}
+EXPORT_SYMBOL(vringh_complete_iotlb);
+
+/**
+ * vringh_notify_enable_iotlb - we want to know if something changes.
+ * @vrh: the vring.
+ *
+ * This always enables notifications, but returns false if there are
+ * now more buffers available in the vring.
+ */
+bool vringh_notify_enable_iotlb(struct vringh *vrh)
+{
+ return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_notify_enable_iotlb);
+
+/**
+ * vringh_notify_disable_iotlb - don't tell us if something changes.
+ * @vrh: the vring.
+ *
+ * This is our normal running state: we disable and then only enable when
+ * we're going to sleep.
+ */
+void vringh_notify_disable_iotlb(struct vringh *vrh)
+{
+ __vringh_notify_disable(vrh, putu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_notify_disable_iotlb);
+
+/**
+ * vringh_need_notify_iotlb - must we tell the other side about used buffers?
+ * @vrh: the vring we've called vringh_complete_iotlb() on.
+ *
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
+ */
+int vringh_need_notify_iotlb(struct vringh *vrh)
+{
+ return __vringh_need_notify(vrh, getu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_need_notify_iotlb);
+
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
new file mode 100644
index 000000000..1f3b89c88
--- /dev/null
+++ b/drivers/vhost/vsock.c
@@ -0,0 +1,948 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vhost transport for vsock
+ *
+ * Copyright (C) 2013-2015 Red Hat, Inc.
+ * Author: Asias He <asias@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ */
+#include <linux/miscdevice.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <net/sock.h>
+#include <linux/virtio_vsock.h>
+#include <linux/vhost.h>
+#include <linux/hashtable.h>
+
+#include <net/af_vsock.h>
+#include "vhost.h"
+
+#define VHOST_VSOCK_DEFAULT_HOST_CID 2
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_VSOCK_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * small pkts.
+ */
+#define VHOST_VSOCK_PKT_WEIGHT 256
+
+enum {
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
+ (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
+};
+
+enum {
+ VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
+};
+
+/* Used to track all the vhost_vsock instances on the system. */
+static DEFINE_MUTEX(vhost_vsock_mutex);
+static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
+
+struct vhost_vsock {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[2];
+
+ /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
+ struct hlist_node hash;
+
+ struct vhost_work send_pkt_work;
+ struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
+
+ atomic_t queued_replies;
+
+ u32 guest_cid;
+ bool seqpacket_allow;
+};
+
+static u32 vhost_transport_get_local_cid(void)
+{
+ return VHOST_VSOCK_DEFAULT_HOST_CID;
+}
+
+/* Callers that dereference the return value must hold vhost_vsock_mutex or the
+ * RCU read lock.
+ */
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+{
+ struct vhost_vsock *vsock;
+
+ hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
+ u32 other_cid = vsock->guest_cid;
+
+ /* Skip instances that have no CID yet */
+ if (other_cid == 0)
+ continue;
+
+ if (other_cid == guest_cid)
+ return vsock;
+
+ }
+
+ return NULL;
+}
+
+static void
+vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ int pkts = 0, total_len = 0;
+ bool added = false;
+ bool restart_tx = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_get_backend(vq))
+ goto out;
+
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
+ /* Avoid further vmexits, we're already processing the virtqueue */
+ vhost_disable_notify(&vsock->dev, vq);
+
+ do {
+ struct virtio_vsock_hdr *hdr;
+ size_t iov_len, payload_len;
+ struct iov_iter iov_iter;
+ u32 flags_to_restore = 0;
+ struct sk_buff *skb;
+ unsigned out, in;
+ size_t nbytes;
+ int head;
+
+ skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
+
+ if (!skb) {
+ vhost_enable_notify(&vsock->dev, vq);
+ break;
+ }
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0) {
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ break;
+ }
+
+ if (head == vq->num) {
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ /* We cannot finish yet if more buffers snuck in while
+ * re-enabling notify.
+ */
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ if (out) {
+ kfree_skb(skb);
+ vq_err(vq, "Expected 0 output buffers, got %u\n", out);
+ break;
+ }
+
+ iov_len = iov_length(&vq->iov[out], in);
+ if (iov_len < sizeof(*hdr)) {
+ kfree_skb(skb);
+ vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
+ break;
+ }
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
+ payload_len = skb->len;
+ hdr = virtio_vsock_hdr(skb);
+
+ /* If the packet is greater than the space available in the
+ * buffer, we split it using multiple buffers.
+ */
+ if (payload_len > iov_len - sizeof(*hdr)) {
+ payload_len = iov_len - sizeof(*hdr);
+
+ /* As we are copying pieces of large packet's buffer to
+ * small rx buffers, headers of packets in rx queue are
+ * created dynamically and are initialized with header
+ * of current packet(except length). But in case of
+ * SOCK_SEQPACKET, we also must clear message delimeter
+ * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
+ * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
+ * there will be sequence of packets with these
+ * bits set. After initialized header will be copied to
+ * rx buffer, these required bits will be restored.
+ */
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
+
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
+ }
+ }
+ }
+
+ /* Set the correct length in the header */
+ hdr->len = cpu_to_le32(payload_len);
+
+ nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
+ if (nbytes != sizeof(*hdr)) {
+ kfree_skb(skb);
+ vq_err(vq, "Faulted on copying pkt hdr\n");
+ break;
+ }
+
+ nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
+ if (nbytes != payload_len) {
+ kfree_skb(skb);
+ vq_err(vq, "Faulted on copying pkt buf\n");
+ break;
+ }
+
+ /* Deliver to monitoring devices all packets that we
+ * will transmit.
+ */
+ virtio_transport_deliver_tap_pkt(skb);
+
+ vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
+ added = true;
+
+ skb_pull(skb, payload_len);
+ total_len += payload_len;
+
+ /* If we didn't send all the payload we can requeue the packet
+ * to send it with the next available buffer.
+ */
+ if (skb->len > 0) {
+ hdr->flags |= cpu_to_le32(flags_to_restore);
+
+ /* We are queueing the same skb to handle
+ * the remaining bytes, and we want to deliver it
+ * to monitoring devices in the next iteration.
+ */
+ virtio_vsock_skb_clear_tap_delivered(skb);
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ } else {
+ if (virtio_vsock_skb_reply(skb)) {
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
+
+ /* Do we have resources to resume tx
+ * processing?
+ */
+ if (val + 1 == tx_vq->num)
+ restart_tx = true;
+ }
+
+ consume_skb(skb);
+ }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+
+ if (restart_tx)
+ vhost_poll_queue(&tx_vq->poll);
+}
+
+static void vhost_transport_send_pkt_work(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_vsock *vsock;
+
+ vsock = container_of(work, struct vhost_vsock, send_pkt_work);
+ vq = &vsock->vqs[VSOCK_VQ_RX];
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int
+vhost_transport_send_pkt(struct sk_buff *skb)
+{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct vhost_vsock *vsock;
+ int len = skb->len;
+
+ rcu_read_lock();
+
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
+ if (!vsock) {
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ if (virtio_vsock_skb_reply(skb))
+ atomic_inc(&vsock->queued_replies);
+
+ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
+ rcu_read_unlock();
+ return len;
+}
+
+static int
+vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+ struct vhost_vsock *vsock;
+ int cnt = 0;
+ int ret = -ENODEV;
+
+ rcu_read_lock();
+
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+ if (!vsock)
+ goto out;
+
+ cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
+
+ if (cnt) {
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ int new_cnt;
+
+ new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+ if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
+ vhost_poll_queue(&tx_vq->poll);
+ }
+
+ ret = 0;
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static struct sk_buff *
+vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
+ unsigned int out, unsigned int in)
+{
+ struct virtio_vsock_hdr *hdr;
+ struct iov_iter iov_iter;
+ struct sk_buff *skb;
+ size_t payload_len;
+ size_t nbytes;
+ size_t len;
+
+ if (in != 0) {
+ vq_err(vq, "Expected 0 input buffers, got %u\n", in);
+ return NULL;
+ }
+
+ len = iov_length(vq->iov, out);
+
+ /* len contains both payload and hdr */
+ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
+
+ hdr = virtio_vsock_hdr(skb);
+ nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
+ if (nbytes != sizeof(*hdr)) {
+ vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
+ sizeof(*hdr), nbytes);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ payload_len = le32_to_cpu(hdr->len);
+
+ /* No payload */
+ if (!payload_len)
+ return skb;
+
+ /* The pkt is too big or the length in the header is invalid */
+ if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
+ payload_len + sizeof(*hdr) > len) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ virtio_vsock_skb_rx_put(skb);
+
+ nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
+ if (nbytes != payload_len) {
+ vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
+ payload_len, nbytes);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ return skb;
+}
+
+/* Is there space left for replies to rx packets? */
+static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
+{
+ struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
+ int val;
+
+ smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
+ val = atomic_read(&vsock->queued_replies);
+
+ return val < vq->num;
+}
+
+static bool vhost_transport_seqpacket_allow(u32 remote_cid);
+
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .module = THIS_MODULE,
+
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+ .cancel_pkt = vhost_transport_cancel_pkt,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
+ .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
+ .seqpacket_allow = vhost_transport_seqpacket_allow,
+ .seqpacket_has_data = virtio_transport_seqpacket_has_data,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
+
+ },
+
+ .send_pkt = vhost_transport_send_pkt,
+};
+
+static bool vhost_transport_seqpacket_allow(u32 remote_cid)
+{
+ struct vhost_vsock *vsock;
+ bool seqpacket_allow = false;
+
+ rcu_read_lock();
+ vsock = vhost_vsock_get(remote_cid);
+
+ if (vsock)
+ seqpacket_allow = vsock->seqpacket_allow;
+
+ rcu_read_unlock();
+
+ return seqpacket_allow;
+}
+
+static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+ int head, pkts = 0, total_len = 0;
+ unsigned int out, in;
+ struct sk_buff *skb;
+ bool added = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_get_backend(vq))
+ goto out;
+
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
+ vhost_disable_notify(&vsock->dev, vq);
+ do {
+ struct virtio_vsock_hdr *hdr;
+
+ if (!vhost_vsock_more_replies(vsock)) {
+ /* Stop tx until the device processes already
+ * pending replies. Leave tx virtqueue
+ * callbacks disabled.
+ */
+ goto no_more_replies;
+ }
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0)
+ break;
+
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ skb = vhost_vsock_alloc_skb(vq, out, in);
+ if (!skb) {
+ vq_err(vq, "Faulted on pkt\n");
+ continue;
+ }
+
+ total_len += sizeof(*hdr) + skb->len;
+
+ /* Deliver to monitoring devices all received packets */
+ virtio_transport_deliver_tap_pkt(skb);
+
+ hdr = virtio_vsock_hdr(skb);
+
+ /* Only accept correctly addressed packets */
+ if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
+ le64_to_cpu(hdr->dst_cid) ==
+ vhost_transport_get_local_cid())
+ virtio_transport_recv_pkt(&vhost_transport, skb);
+ else
+ kfree_skb(skb);
+
+ vhost_add_used(vq, head, 0);
+ added = true;
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
+
+no_more_replies:
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int vhost_vsock_start(struct vhost_vsock *vsock)
+{
+ struct vhost_virtqueue *vq;
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_access_ok(vq)) {
+ ret = -EFAULT;
+ goto err_vq;
+ }
+
+ if (!vhost_vq_get_backend(vq)) {
+ vhost_vq_set_backend(vq, vsock);
+ ret = vhost_vq_init_access(vq);
+ if (ret)
+ goto err_vq;
+ }
+
+ mutex_unlock(&vq->mutex);
+ }
+
+ /* Some packets may have been queued before the device was started,
+ * let's kick the send worker to send them.
+ */
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+
+err_vq:
+ vhost_vq_set_backend(vq, NULL);
+ mutex_unlock(&vq->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vhost_vq_set_backend(vq, NULL);
+ mutex_unlock(&vq->mutex);
+ }
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
+{
+ size_t i;
+ int ret = 0;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ if (check_owner) {
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vhost_vq_set_backend(vq, NULL);
+ mutex_unlock(&vq->mutex);
+ }
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static void vhost_vsock_free(struct vhost_vsock *vsock)
+{
+ kvfree(vsock);
+}
+
+static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+{
+ struct vhost_virtqueue **vqs;
+ struct vhost_vsock *vsock;
+ int ret;
+
+ /* This struct is large and allocation could fail, fall back to vmalloc
+ * if there is no other way.
+ */
+ vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!vsock)
+ return -ENOMEM;
+
+ vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vsock->guest_cid = 0; /* no CID assigned yet */
+
+ atomic_set(&vsock->queued_replies, 0);
+
+ vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
+ vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
+ vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
+ vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
+
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
+ UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
+ VHOST_VSOCK_WEIGHT, true, NULL);
+
+ file->private_data = vsock;
+ skb_queue_head_init(&vsock->send_pkt_queue);
+ vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
+ return 0;
+
+out:
+ vhost_vsock_free(vsock);
+ return ret;
+}
+
+static void vhost_vsock_flush(struct vhost_vsock *vsock)
+{
+ vhost_dev_flush(&vsock->dev);
+}
+
+static void vhost_vsock_reset_orphans(struct sock *sk)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ /* vmci_transport.c doesn't take sk_lock here either. At least we're
+ * under vsock_table_lock so the sock cannot disappear while we're
+ * executing.
+ */
+
+ /* If the peer is still valid, no need to reset connection */
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+ return;
+
+ /* If the close timeout is pending, let it expire. This avoids races
+ * with the timeout callback.
+ */
+ if (vsk->close_work_scheduled)
+ return;
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk_error_report(sk);
+}
+
+static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+{
+ struct vhost_vsock *vsock = file->private_data;
+
+ mutex_lock(&vhost_vsock_mutex);
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
+ mutex_unlock(&vhost_vsock_mutex);
+
+ /* Wait for other CPUs to finish using vsock */
+ synchronize_rcu();
+
+ /* Iterating over all connections for all CIDs to find orphans is
+ * inefficient. Room for improvement here. */
+ vsock_for_each_connected_socket(&vhost_transport.transport,
+ vhost_vsock_reset_orphans);
+
+ /* Don't check the owner, because we are in the release path, so we
+ * need to stop the vsock device in any case.
+ * vhost_vsock_stop() can not fail in this case, so we don't need to
+ * check the return code.
+ */
+ vhost_vsock_stop(vsock, false);
+ vhost_vsock_flush(vsock);
+ vhost_dev_stop(&vsock->dev);
+
+ virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
+
+ vhost_dev_cleanup(&vsock->dev);
+ kfree(vsock->dev.vqs);
+ vhost_vsock_free(vsock);
+ return 0;
+}
+
+static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
+{
+ struct vhost_vsock *other;
+
+ /* Refuse reserved CIDs */
+ if (guest_cid <= VMADDR_CID_HOST ||
+ guest_cid == U32_MAX)
+ return -EINVAL;
+
+ /* 64-bit CIDs are not yet supported */
+ if (guest_cid > U32_MAX)
+ return -EINVAL;
+
+ /* Refuse if CID is assigned to the guest->host transport (i.e. nested
+ * VM), to make the loopback work.
+ */
+ if (vsock_find_cid(guest_cid))
+ return -EADDRINUSE;
+
+ /* Refuse if CID is already in use */
+ mutex_lock(&vhost_vsock_mutex);
+ other = vhost_vsock_get(guest_cid);
+ if (other && other != vsock) {
+ mutex_unlock(&vhost_vsock_mutex);
+ return -EADDRINUSE;
+ }
+
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
+
+ vsock->guest_cid = guest_cid;
+ hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
+ mutex_unlock(&vhost_vsock_mutex);
+
+ return 0;
+}
+
+static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ if (features & ~VHOST_VSOCK_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vsock->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vsock->dev)) {
+ goto err;
+ }
+
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+ if (vhost_init_device_iotlb(&vsock->dev, true))
+ goto err;
+ }
+
+ if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
+ vsock->seqpacket_allow = true;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
+}
+
+static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_vsock *vsock = f->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 guest_cid;
+ u64 features;
+ int start;
+ int r;
+
+ switch (ioctl) {
+ case VHOST_VSOCK_SET_GUEST_CID:
+ if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
+ return -EFAULT;
+ return vhost_vsock_set_cid(vsock, guest_cid);
+ case VHOST_VSOCK_SET_RUNNING:
+ if (copy_from_user(&start, argp, sizeof(start)))
+ return -EFAULT;
+ if (start)
+ return vhost_vsock_start(vsock);
+ else
+ return vhost_vsock_stop(vsock, true);
+ case VHOST_GET_FEATURES:
+ features = VHOST_VSOCK_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ return vhost_vsock_set_features(vsock, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VSOCK_BACKEND_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&vsock->dev, features);
+ return 0;
+ default:
+ mutex_lock(&vsock->dev.mutex);
+ r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
+ else
+ vhost_vsock_flush(vsock);
+ mutex_unlock(&vsock->dev.mutex);
+ return r;
+ }
+}
+
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
+static const struct file_operations vhost_vsock_fops = {
+ .owner = THIS_MODULE,
+ .open = vhost_vsock_dev_open,
+ .release = vhost_vsock_dev_release,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = vhost_vsock_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .read_iter = vhost_vsock_chr_read_iter,
+ .write_iter = vhost_vsock_chr_write_iter,
+ .poll = vhost_vsock_chr_poll,
+};
+
+static struct miscdevice vhost_vsock_misc = {
+ .minor = VHOST_VSOCK_MINOR,
+ .name = "vhost-vsock",
+ .fops = &vhost_vsock_fops,
+};
+
+static int __init vhost_vsock_init(void)
+{
+ int ret;
+
+ ret = vsock_core_register(&vhost_transport.transport,
+ VSOCK_TRANSPORT_F_H2G);
+ if (ret < 0)
+ return ret;
+
+ ret = misc_register(&vhost_vsock_misc);
+ if (ret) {
+ vsock_core_unregister(&vhost_transport.transport);
+ return ret;
+ }
+
+ return 0;
+};
+
+static void __exit vhost_vsock_exit(void)
+{
+ misc_deregister(&vhost_vsock_misc);
+ vsock_core_unregister(&vhost_transport.transport);
+};
+
+module_init(vhost_vsock_init);
+module_exit(vhost_vsock_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Asias He");
+MODULE_DESCRIPTION("vhost transport for vsock ");
+MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
+MODULE_ALIAS("devname:vhost-vsock");