summaryrefslogtreecommitdiffstats
path: root/drivers/hv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/Kconfig33
-rw-r--r--drivers/hv/Makefile16
-rw-r--r--drivers/hv/channel.c1352
-rw-r--r--drivers/hv/channel_mgmt.c1619
-rw-r--r--drivers/hv/connection.c549
-rw-r--r--drivers/hv/hv.c442
-rw-r--r--drivers/hv/hv_balloon.c2122
-rw-r--r--drivers/hv/hv_common.c321
-rw-r--r--drivers/hv/hv_debugfs.c178
-rw-r--r--drivers/hv/hv_fcopy.c427
-rw-r--r--drivers/hv/hv_kvp.c824
-rw-r--r--drivers/hv/hv_snapshot.c458
-rw-r--r--drivers/hv/hv_trace.c6
-rw-r--r--drivers/hv/hv_trace.h361
-rw-r--r--drivers/hv/hv_trace_balloon.h48
-rw-r--r--drivers/hv/hv_util.c793
-rw-r--r--drivers/hv/hv_utils_transport.c350
-rw-r--r--drivers/hv/hv_utils_transport.h45
-rw-r--r--drivers/hv/hyperv_vmbus.h482
-rw-r--r--drivers/hv/ring_buffer.c676
-rw-r--r--drivers/hv/vmbus_drv.c2828
21 files changed, 13930 insertions, 0 deletions
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
new file mode 100644
index 000000000..0747a8f1f
--- /dev/null
+++ b/drivers/hv/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Microsoft Hyper-V guest support"
+
+config HYPERV
+ tristate "Microsoft Hyper-V client drivers"
+ depends on ACPI && ((X86 && X86_LOCAL_APIC && HYPERVISOR_GUEST) \
+ || (ARM64 && !CPU_BIG_ENDIAN))
+ select PARAVIRT
+ select X86_HV_CALLBACK_VECTOR if X86
+ select VMAP_PFN
+ help
+ Select this option to run Linux as a Hyper-V client operating
+ system.
+
+config HYPERV_TIMER
+ def_bool HYPERV && X86
+
+config HYPERV_UTILS
+ tristate "Microsoft Hyper-V Utilities driver"
+ depends on HYPERV && CONNECTOR && NLS
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ Select this option to enable the Hyper-V Utilities.
+
+config HYPERV_BALLOON
+ tristate "Microsoft Hyper-V Balloon driver"
+ depends on HYPERV
+ select PAGE_REPORTING
+ help
+ Select this option to enable Hyper-V Balloon driver.
+
+endmenu
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
new file mode 100644
index 000000000..d76df5c8c
--- /dev/null
+++ b/drivers/hv/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_HYPERV) += hv_vmbus.o
+obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
+obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
+
+CFLAGS_hv_trace.o = -I$(src)
+CFLAGS_hv_balloon.o = -I$(src)
+
+hv_vmbus-y := vmbus_drv.o \
+ hv.o connection.o channel.o \
+ channel_mgmt.o ring_buffer.o hv_trace.o
+hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
+hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
+
+# Code that must be built-in
+obj-$(subst m,y,$(CONFIG_HYPERV)) += hv_common.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
new file mode 100644
index 000000000..56f7e06c6
--- /dev/null
+++ b/drivers/hv/channel.c
@@ -0,0 +1,1352 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/hyperv.h>
+#include <linux/uio.h>
+#include <linux/interrupt.h>
+#include <linux/set_memory.h>
+#include <asm/page.h>
+#include <asm/mshyperv.h>
+
+#include "hyperv_vmbus.h"
+
+/*
+ * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
+ *
+ * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
+ *
+ * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
+ * (because of the alignment requirement), however, the hypervisor only
+ * uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
+ * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
+ * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
+ * total size that the guest uses minus twice of the gap size.
+ */
+static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
+{
+ switch (type) {
+ case HV_GPADL_BUFFER:
+ return size;
+ case HV_GPADL_RING:
+ /* The size of a ringbuffer must be page-aligned */
+ BUG_ON(size % PAGE_SIZE);
+ /*
+ * Two things to notice here:
+ * 1) We're processing two ring buffers as a unit
+ * 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
+ * the first guest-size page of each of the two ring buffers.
+ * So we effectively subtract out two guest-size pages, and add
+ * back two Hyper-V size pages.
+ */
+ return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
+ }
+ BUG();
+ return 0;
+}
+
+/*
+ * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
+ * HV_HYP_PAGE) in a ring gpadl based on the
+ * offset in the guest
+ *
+ * @offset: the offset (in bytes) where the send ringbuffer starts in the
+ * virtual address space of the guest
+ */
+static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
+{
+
+ /*
+ * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
+ * header (because of the alignment requirement), however, the
+ * hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
+ * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
+ *
+ * And to calculate the effective send offset in gpadl, we need to
+ * substract this gap.
+ */
+ return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
+}
+
+/*
+ * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
+ * the gpadl
+ *
+ * @type: the type of the gpadl
+ * @kbuffer: the pointer to the gpadl in the guest
+ * @size: the total size (in bytes) of the gpadl
+ * @send_offset: the offset (in bytes) where the send ringbuffer starts in the
+ * virtual address space of the guest
+ * @i: the index
+ */
+static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
+ u32 size, u32 send_offset, int i)
+{
+ int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
+ unsigned long delta = 0UL;
+
+ switch (type) {
+ case HV_GPADL_BUFFER:
+ break;
+ case HV_GPADL_RING:
+ if (i == 0)
+ delta = 0;
+ else if (i <= send_idx)
+ delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
+ else
+ delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
+}
+
+/*
+ * vmbus_setevent- Trigger an event notification on the specified
+ * channel.
+ */
+void vmbus_setevent(struct vmbus_channel *channel)
+{
+ struct hv_monitor_page *monitorpage;
+
+ trace_vmbus_setevent(channel);
+
+ /*
+ * For channels marked as in "low latency" mode
+ * bypass the monitor page mechanism.
+ */
+ if (channel->offermsg.monitor_allocated && !channel->low_latency) {
+ vmbus_send_interrupt(channel->offermsg.child_relid);
+
+ /* Get the child to parent monitor page */
+ monitorpage = vmbus_connection.monitor_pages[1];
+
+ sync_set_bit(channel->monitor_bit,
+ (unsigned long *)&monitorpage->trigger_group
+ [channel->monitor_grp].pending);
+
+ } else {
+ vmbus_set_event(channel);
+ }
+}
+EXPORT_SYMBOL_GPL(vmbus_setevent);
+
+/* vmbus_free_ring - drop mapping of ring buffer */
+void vmbus_free_ring(struct vmbus_channel *channel)
+{
+ hv_ringbuffer_cleanup(&channel->outbound);
+ hv_ringbuffer_cleanup(&channel->inbound);
+
+ if (channel->ringbuffer_page) {
+ __free_pages(channel->ringbuffer_page,
+ get_order(channel->ringbuffer_pagecount
+ << PAGE_SHIFT));
+ channel->ringbuffer_page = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(vmbus_free_ring);
+
+/* vmbus_alloc_ring - allocate and map pages for ring buffer */
+int vmbus_alloc_ring(struct vmbus_channel *newchannel,
+ u32 send_size, u32 recv_size)
+{
+ struct page *page;
+ int order;
+
+ if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
+ return -EINVAL;
+
+ /* Allocate the ring buffer */
+ order = get_order(send_size + recv_size);
+ page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
+ GFP_KERNEL|__GFP_ZERO, order);
+
+ if (!page)
+ page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
+
+ if (!page)
+ return -ENOMEM;
+
+ newchannel->ringbuffer_page = page;
+ newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
+ newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
+
+/* Used for Hyper-V Socket: a guest client's connect() to the host */
+int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
+ const guid_t *shv_host_servie_id)
+{
+ struct vmbus_channel_tl_connect_request conn_msg;
+ int ret;
+
+ memset(&conn_msg, 0, sizeof(conn_msg));
+ conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
+ conn_msg.guest_endpoint_id = *shv_guest_servie_id;
+ conn_msg.host_service_id = *shv_host_servie_id;
+
+ ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+
+ trace_vmbus_send_tl_connect_request(&conn_msg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
+
+static int send_modifychannel_without_ack(struct vmbus_channel *channel, u32 target_vp)
+{
+ struct vmbus_channel_modifychannel msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+ msg.child_relid = channel->offermsg.child_relid;
+ msg.target_vp = target_vp;
+
+ ret = vmbus_post_msg(&msg, sizeof(msg), true);
+ trace_vmbus_send_modifychannel(&msg, ret);
+
+ return ret;
+}
+
+static int send_modifychannel_with_ack(struct vmbus_channel *channel, u32 target_vp)
+{
+ struct vmbus_channel_modifychannel *msg;
+ struct vmbus_channel_msginfo *info;
+ unsigned long flags;
+ int ret;
+
+ info = kzalloc(sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_modifychannel),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ init_completion(&info->waitevent);
+ info->waiting_channel = channel;
+
+ msg = (struct vmbus_channel_modifychannel *)info->msg;
+ msg->header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+ msg->child_relid = channel->offermsg.child_relid;
+ msg->target_vp = target_vp;
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&info->msglistentry, &vmbus_connection.chn_msg_list);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(msg, sizeof(*msg), true);
+ trace_vmbus_send_modifychannel(msg, ret);
+ if (ret != 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ goto free_info;
+ }
+
+ /*
+ * Release channel_mutex; otherwise, vmbus_onoffer_rescind() could block on
+ * the mutex and be unable to signal the completion.
+ *
+ * See the caller target_cpu_store() for information about the usage of the
+ * mutex.
+ */
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ wait_for_completion(&info->waitevent);
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (info->response.modify_response.status)
+ ret = -EAGAIN;
+
+free_info:
+ kfree(info);
+ return ret;
+}
+
+/*
+ * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
+ *
+ * CHANNELMSG_MODIFYCHANNEL messages are aynchronous. When VMbus version 5.3
+ * or later is negotiated, Hyper-V always sends an ACK in response to such a
+ * message. For VMbus version 5.2 and earlier, it never sends an ACK. With-
+ * out an ACK, we can not know when the host will stop interrupting the "old"
+ * vCPU and start interrupting the "new" vCPU for the given channel.
+ *
+ * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
+ * VERSION_WIN10_V4_1.
+ */
+int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp)
+{
+ if (vmbus_proto_version >= VERSION_WIN10_V5_3)
+ return send_modifychannel_with_ack(channel, target_vp);
+ return send_modifychannel_without_ack(channel, target_vp);
+}
+EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
+
+/*
+ * create_gpadl_header - Creates a gpadl for the specified buffer
+ */
+static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
+ u32 size, u32 send_offset,
+ struct vmbus_channel_msginfo **msginfo)
+{
+ int i;
+ int pagecount;
+ struct vmbus_channel_gpadl_header *gpadl_header;
+ struct vmbus_channel_gpadl_body *gpadl_body;
+ struct vmbus_channel_msginfo *msgheader;
+ struct vmbus_channel_msginfo *msgbody = NULL;
+ u32 msgsize;
+
+ int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
+
+ pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
+
+ /* do we need a gpadl body msg */
+ pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+ sizeof(struct vmbus_channel_gpadl_header) -
+ sizeof(struct gpa_range);
+ pfncount = pfnsize / sizeof(u64);
+
+ if (pagecount > pfncount) {
+ /* we need a gpadl body */
+ /* fill in the header */
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_header) +
+ sizeof(struct gpa_range) + pfncount * sizeof(u64);
+ msgheader = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgheader)
+ goto nomem;
+
+ INIT_LIST_HEAD(&msgheader->submsglist);
+ msgheader->msgsize = msgsize;
+
+ gpadl_header = (struct vmbus_channel_gpadl_header *)
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
+ pagecount * sizeof(u64);
+ gpadl_header->range[0].byte_offset = 0;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
+ for (i = 0; i < pfncount; i++)
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
+ *msginfo = msgheader;
+
+ pfnsum = pfncount;
+ pfnleft = pagecount - pfncount;
+
+ /* how many pfns can we fit */
+ pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+ sizeof(struct vmbus_channel_gpadl_body);
+ pfncount = pfnsize / sizeof(u64);
+
+ /* fill in the body */
+ while (pfnleft) {
+ if (pfnleft > pfncount)
+ pfncurr = pfncount;
+ else
+ pfncurr = pfnleft;
+
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_body) +
+ pfncurr * sizeof(u64);
+ msgbody = kzalloc(msgsize, GFP_KERNEL);
+
+ if (!msgbody) {
+ struct vmbus_channel_msginfo *pos = NULL;
+ struct vmbus_channel_msginfo *tmp = NULL;
+ /*
+ * Free up all the allocated messages.
+ */
+ list_for_each_entry_safe(pos, tmp,
+ &msgheader->submsglist,
+ msglistentry) {
+
+ list_del(&pos->msglistentry);
+ kfree(pos);
+ }
+
+ goto nomem;
+ }
+
+ msgbody->msgsize = msgsize;
+ gpadl_body =
+ (struct vmbus_channel_gpadl_body *)msgbody->msg;
+
+ /*
+ * Gpadl is u32 and we are using a pointer which could
+ * be 64-bit
+ * This is governed by the guest/host protocol and
+ * so the hypervisor guarantees that this is ok.
+ */
+ for (i = 0; i < pfncurr; i++)
+ gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+ kbuffer, size, send_offset, pfnsum + i);
+
+ /* add to msg header */
+ list_add_tail(&msgbody->msglistentry,
+ &msgheader->submsglist);
+ pfnsum += pfncurr;
+ pfnleft -= pfncurr;
+ }
+ } else {
+ /* everything fits in a header */
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_header) +
+ sizeof(struct gpa_range) + pagecount * sizeof(u64);
+ msgheader = kzalloc(msgsize, GFP_KERNEL);
+ if (msgheader == NULL)
+ goto nomem;
+
+ INIT_LIST_HEAD(&msgheader->submsglist);
+ msgheader->msgsize = msgsize;
+
+ gpadl_header = (struct vmbus_channel_gpadl_header *)
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
+ pagecount * sizeof(u64);
+ gpadl_header->range[0].byte_offset = 0;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
+ for (i = 0; i < pagecount; i++)
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
+
+ *msginfo = msgheader;
+ }
+
+ return 0;
+nomem:
+ kfree(msgheader);
+ kfree(msgbody);
+ return -ENOMEM;
+}
+
+/*
+ * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
+ *
+ * @channel: a channel
+ * @type: the type of the corresponding GPADL, only meaningful for the guest.
+ * @kbuffer: from kmalloc or vmalloc
+ * @size: page-size multiple
+ * @send_offset: the offset (in bytes) where the send ring buffer starts,
+ * should be 0 for BUFFER type gpadl
+ * @gpadl_handle: some funky thing
+ */
+static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
+ enum hv_gpadl_type type, void *kbuffer,
+ u32 size, u32 send_offset,
+ struct vmbus_gpadl *gpadl)
+{
+ struct vmbus_channel_gpadl_header *gpadlmsg;
+ struct vmbus_channel_gpadl_body *gpadl_body;
+ struct vmbus_channel_msginfo *msginfo = NULL;
+ struct vmbus_channel_msginfo *submsginfo, *tmp;
+ struct list_head *curr;
+ u32 next_gpadl_handle;
+ unsigned long flags;
+ int ret = 0;
+
+ next_gpadl_handle =
+ (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
+
+ ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
+ if (ret)
+ return ret;
+
+ ret = set_memory_decrypted((unsigned long)kbuffer,
+ PFN_UP(size));
+ if (ret) {
+ dev_warn(&channel->device_obj->device,
+ "Failed to set host visibility for new GPADL %d.\n",
+ ret);
+ return ret;
+ }
+
+ init_completion(&msginfo->waitevent);
+ msginfo->waiting_channel = channel;
+
+ gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
+ gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
+ gpadlmsg->child_relid = channel->offermsg.child_relid;
+ gpadlmsg->gpadl = next_gpadl_handle;
+
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&msginfo->msglistentry,
+ &vmbus_connection.chn_msg_list);
+
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (channel->rescind) {
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
+ ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
+ sizeof(*msginfo), true);
+
+ trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
+
+ if (ret != 0)
+ goto cleanup;
+
+ list_for_each(curr, &msginfo->submsglist) {
+ submsginfo = (struct vmbus_channel_msginfo *)curr;
+ gpadl_body =
+ (struct vmbus_channel_gpadl_body *)submsginfo->msg;
+
+ gpadl_body->header.msgtype =
+ CHANNELMSG_GPADL_BODY;
+ gpadl_body->gpadl = next_gpadl_handle;
+
+ ret = vmbus_post_msg(gpadl_body,
+ submsginfo->msgsize - sizeof(*submsginfo),
+ true);
+
+ trace_vmbus_establish_gpadl_body(gpadl_body, ret);
+
+ if (ret != 0)
+ goto cleanup;
+
+ }
+ wait_for_completion(&msginfo->waitevent);
+
+ if (msginfo->response.gpadl_created.creation_status != 0) {
+ pr_err("Failed to establish GPADL: err = 0x%x\n",
+ msginfo->response.gpadl_created.creation_status);
+
+ ret = -EDQUOT;
+ goto cleanup;
+ }
+
+ if (channel->rescind) {
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
+ /* At this point, we received the gpadl created msg */
+ gpadl->gpadl_handle = gpadlmsg->gpadl;
+ gpadl->buffer = kbuffer;
+ gpadl->size = size;
+
+
+cleanup:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
+ msglistentry) {
+ kfree(submsginfo);
+ }
+
+ kfree(msginfo);
+
+ if (ret)
+ set_memory_encrypted((unsigned long)kbuffer,
+ PFN_UP(size));
+
+ return ret;
+}
+
+/*
+ * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
+ *
+ * @channel: a channel
+ * @kbuffer: from kmalloc or vmalloc
+ * @size: page-size multiple
+ * @gpadl_handle: some funky thing
+ */
+int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ u32 size, struct vmbus_gpadl *gpadl)
+{
+ return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
+ 0U, gpadl);
+}
+EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
+
+/**
+ * request_arr_init - Allocates memory for the requestor array. Each slot
+ * keeps track of the next available slot in the array. Initially, each
+ * slot points to the next one (as in a Linked List). The last slot
+ * does not point to anything, so its value is U64_MAX by default.
+ * @size The size of the array
+ */
+static u64 *request_arr_init(u32 size)
+{
+ int i;
+ u64 *req_arr;
+
+ req_arr = kcalloc(size, sizeof(u64), GFP_KERNEL);
+ if (!req_arr)
+ return NULL;
+
+ for (i = 0; i < size - 1; i++)
+ req_arr[i] = i + 1;
+
+ /* Last slot (no more available slots) */
+ req_arr[i] = U64_MAX;
+
+ return req_arr;
+}
+
+/*
+ * vmbus_alloc_requestor - Initializes @rqstor's fields.
+ * Index 0 is the first free slot
+ * @size: Size of the requestor array
+ */
+static int vmbus_alloc_requestor(struct vmbus_requestor *rqstor, u32 size)
+{
+ u64 *rqst_arr;
+ unsigned long *bitmap;
+
+ rqst_arr = request_arr_init(size);
+ if (!rqst_arr)
+ return -ENOMEM;
+
+ bitmap = bitmap_zalloc(size, GFP_KERNEL);
+ if (!bitmap) {
+ kfree(rqst_arr);
+ return -ENOMEM;
+ }
+
+ rqstor->req_arr = rqst_arr;
+ rqstor->req_bitmap = bitmap;
+ rqstor->size = size;
+ rqstor->next_request_id = 0;
+ spin_lock_init(&rqstor->req_lock);
+
+ return 0;
+}
+
+/*
+ * vmbus_free_requestor - Frees memory allocated for @rqstor
+ * @rqstor: Pointer to the requestor struct
+ */
+static void vmbus_free_requestor(struct vmbus_requestor *rqstor)
+{
+ kfree(rqstor->req_arr);
+ bitmap_free(rqstor->req_bitmap);
+}
+
+static int __vmbus_open(struct vmbus_channel *newchannel,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ struct vmbus_channel_open_channel *open_msg;
+ struct vmbus_channel_msginfo *open_info = NULL;
+ struct page *page = newchannel->ringbuffer_page;
+ u32 send_pages, recv_pages;
+ unsigned long flags;
+ int err;
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES)
+ return -EINVAL;
+
+ send_pages = newchannel->ringbuffer_send_offset;
+ recv_pages = newchannel->ringbuffer_pagecount - send_pages;
+
+ if (newchannel->state != CHANNEL_OPEN_STATE)
+ return -EINVAL;
+
+ /* Create and init requestor */
+ if (newchannel->rqstor_size) {
+ if (vmbus_alloc_requestor(&newchannel->requestor, newchannel->rqstor_size))
+ return -ENOMEM;
+ }
+
+ newchannel->state = CHANNEL_OPENING_STATE;
+ newchannel->onchannel_callback = onchannelcallback;
+ newchannel->channel_callback_context = context;
+
+ if (!newchannel->max_pkt_size)
+ newchannel->max_pkt_size = VMBUS_DEFAULT_MAX_PKT_SIZE;
+
+ /* Establish the gpadl for the ring buffer */
+ newchannel->ringbuffer_gpadlhandle.gpadl_handle = 0;
+
+ err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
+ page_address(newchannel->ringbuffer_page),
+ (send_pages + recv_pages) << PAGE_SHIFT,
+ newchannel->ringbuffer_send_offset << PAGE_SHIFT,
+ &newchannel->ringbuffer_gpadlhandle);
+ if (err)
+ goto error_clean_ring;
+
+ err = hv_ringbuffer_init(&newchannel->outbound,
+ page, send_pages, 0);
+ if (err)
+ goto error_free_gpadl;
+
+ err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
+ recv_pages, newchannel->max_pkt_size);
+ if (err)
+ goto error_free_gpadl;
+
+ /* Create and init the channel open message */
+ open_info = kzalloc(sizeof(*open_info) +
+ sizeof(struct vmbus_channel_open_channel),
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+ goto error_free_gpadl;
+ }
+
+ init_completion(&open_info->waitevent);
+ open_info->waiting_channel = newchannel;
+
+ open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
+ open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
+ open_msg->openid = newchannel->offermsg.child_relid;
+ open_msg->child_relid = newchannel->offermsg.child_relid;
+ open_msg->ringbuffer_gpadlhandle
+ = newchannel->ringbuffer_gpadlhandle.gpadl_handle;
+ /*
+ * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
+ * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
+ * here we calculate it into HV_HYP_PAGE.
+ */
+ open_msg->downstream_ringbuffer_pageoffset =
+ hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
+ open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
+
+ if (userdatalen)
+ memcpy(open_msg->userdata, userdata, userdatalen);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&open_info->msglistentry,
+ &vmbus_connection.chn_msg_list);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (newchannel->rescind) {
+ err = -ENODEV;
+ goto error_clean_msglist;
+ }
+
+ err = vmbus_post_msg(open_msg,
+ sizeof(struct vmbus_channel_open_channel), true);
+
+ trace_vmbus_open(open_msg, err);
+
+ if (err != 0)
+ goto error_clean_msglist;
+
+ wait_for_completion(&open_info->waitevent);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (newchannel->rescind) {
+ err = -ENODEV;
+ goto error_free_info;
+ }
+
+ if (open_info->response.open_result.status) {
+ err = -EAGAIN;
+ goto error_free_info;
+ }
+
+ newchannel->state = CHANNEL_OPENED_STATE;
+ kfree(open_info);
+ return 0;
+
+error_clean_msglist:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+error_free_info:
+ kfree(open_info);
+error_free_gpadl:
+ vmbus_teardown_gpadl(newchannel, &newchannel->ringbuffer_gpadlhandle);
+error_clean_ring:
+ hv_ringbuffer_cleanup(&newchannel->outbound);
+ hv_ringbuffer_cleanup(&newchannel->inbound);
+ vmbus_free_requestor(&newchannel->requestor);
+ newchannel->state = CHANNEL_OPEN_STATE;
+ return err;
+}
+
+/*
+ * vmbus_connect_ring - Open the channel but reuse ring buffer
+ */
+int vmbus_connect_ring(struct vmbus_channel *newchannel,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
+}
+EXPORT_SYMBOL_GPL(vmbus_connect_ring);
+
+/*
+ * vmbus_open - Open the specified channel.
+ */
+int vmbus_open(struct vmbus_channel *newchannel,
+ u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ int err;
+
+ err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
+ recv_ringbuffer_size);
+ if (err)
+ return err;
+
+ err = __vmbus_open(newchannel, userdata, userdatalen,
+ onchannelcallback, context);
+ if (err)
+ vmbus_free_ring(newchannel);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(vmbus_open);
+
+/*
+ * vmbus_teardown_gpadl -Teardown the specified GPADL handle
+ */
+int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpadl)
+{
+ struct vmbus_channel_gpadl_teardown *msg;
+ struct vmbus_channel_msginfo *info;
+ unsigned long flags;
+ int ret;
+
+ info = kzalloc(sizeof(*info) +
+ sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ init_completion(&info->waitevent);
+ info->waiting_channel = channel;
+
+ msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
+
+ msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
+ msg->child_relid = channel->offermsg.child_relid;
+ msg->gpadl = gpadl->gpadl_handle;
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&info->msglistentry,
+ &vmbus_connection.chn_msg_list);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (channel->rescind)
+ goto post_msg_err;
+
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
+ true);
+
+ trace_vmbus_teardown_gpadl(msg, ret);
+
+ if (ret)
+ goto post_msg_err;
+
+ wait_for_completion(&info->waitevent);
+
+ gpadl->gpadl_handle = 0;
+
+post_msg_err:
+ /*
+ * If the channel has been rescinded;
+ * we will be awakened by the rescind
+ * handler; set the error code to zero so we don't leak memory.
+ */
+ if (channel->rescind)
+ ret = 0;
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ kfree(info);
+
+ ret = set_memory_encrypted((unsigned long)gpadl->buffer,
+ PFN_UP(gpadl->size));
+ if (ret)
+ pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
+
+void vmbus_reset_channel_cb(struct vmbus_channel *channel)
+{
+ unsigned long flags;
+
+ /*
+ * vmbus_on_event(), running in the per-channel tasklet, can race
+ * with vmbus_close_internal() in the case of SMP guest, e.g., when
+ * the former is accessing channel->inbound.ring_buffer, the latter
+ * could be freeing the ring_buffer pages, so here we must stop it
+ * first.
+ *
+ * vmbus_chan_sched() might call the netvsc driver callback function
+ * that ends up scheduling NAPI work that accesses the ring buffer.
+ * At this point, we have to ensure that any such work is completed
+ * and that the channel ring buffer is no longer being accessed, cf.
+ * the calls to napi_disable() in netvsc_device_remove().
+ */
+ tasklet_disable(&channel->callback_event);
+
+ /* See the inline comments in vmbus_chan_sched(). */
+ spin_lock_irqsave(&channel->sched_lock, flags);
+ channel->onchannel_callback = NULL;
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
+
+ channel->sc_creation_callback = NULL;
+
+ /* Re-enable tasklet for use on re-open */
+ tasklet_enable(&channel->callback_event);
+}
+
+static int vmbus_close_internal(struct vmbus_channel *channel)
+{
+ struct vmbus_channel_close_channel *msg;
+ int ret;
+
+ vmbus_reset_channel_cb(channel);
+
+ /*
+ * In case a device driver's probe() fails (e.g.,
+ * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
+ * rescinded later (e.g., we dynamically disable an Integrated Service
+ * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
+ * here we should skip most of the below cleanup work.
+ */
+ if (channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
+
+ channel->state = CHANNEL_OPEN_STATE;
+
+ /* Send a closing message */
+
+ msg = &channel->close_msg.msg;
+
+ msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
+ msg->child_relid = channel->offermsg.child_relid;
+
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
+ true);
+
+ trace_vmbus_close_internal(msg, ret);
+
+ if (ret) {
+ pr_err("Close failed: close post msg return is %d\n", ret);
+ /*
+ * If we failed to post the close msg,
+ * it is perhaps better to leak memory.
+ */
+ }
+
+ /* Tear down the gpadl for the channel's ring buffer */
+ else if (channel->ringbuffer_gpadlhandle.gpadl_handle) {
+ ret = vmbus_teardown_gpadl(channel, &channel->ringbuffer_gpadlhandle);
+ if (ret) {
+ pr_err("Close failed: teardown gpadl return %d\n", ret);
+ /*
+ * If we failed to teardown gpadl,
+ * it is perhaps better to leak memory.
+ */
+ }
+ }
+
+ if (!ret)
+ vmbus_free_requestor(&channel->requestor);
+
+ return ret;
+}
+
+/* disconnect ring - close all channels */
+int vmbus_disconnect_ring(struct vmbus_channel *channel)
+{
+ struct vmbus_channel *cur_channel, *tmp;
+ int ret;
+
+ if (channel->primary_channel != NULL)
+ return -EINVAL;
+
+ list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
+ if (cur_channel->rescind)
+ wait_for_completion(&cur_channel->rescind_event);
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+ if (vmbus_close_internal(cur_channel) == 0) {
+ vmbus_free_ring(cur_channel);
+
+ if (cur_channel->rescind)
+ hv_process_channel_removal(cur_channel);
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ }
+
+ /*
+ * Now close the primary.
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+ ret = vmbus_close_internal(channel);
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
+
+/*
+ * vmbus_close - Close the specified channel
+ */
+void vmbus_close(struct vmbus_channel *channel)
+{
+ if (vmbus_disconnect_ring(channel) == 0)
+ vmbus_free_ring(channel);
+}
+EXPORT_SYMBOL_GPL(vmbus_close);
+
+/**
+ * vmbus_sendpacket_getid() - Send the specified buffer on the given channel
+ * @channel: Pointer to vmbus_channel structure
+ * @buffer: Pointer to the buffer you want to send the data from.
+ * @bufferlen: Maximum size of what the buffer holds.
+ * @requestid: Identifier of the request
+ * @trans_id: Identifier of the transaction associated to this request, if
+ * the send is successful; undefined, otherwise.
+ * @type: Type of packet that is being sent e.g. negotiate, time
+ * packet etc.
+ * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
+ *
+ * Sends data in @buffer directly to Hyper-V via the vmbus.
+ * This will send the data unparsed to Hyper-V.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+int vmbus_sendpacket_getid(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u64 requestid, u64 *trans_id,
+ enum vmbus_packet_type type, u32 flags)
+{
+ struct vmpacket_descriptor desc;
+ u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
+ u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+ struct kvec bufferlist[3];
+ u64 aligned_data = 0;
+ int num_vecs = ((bufferlen != 0) ? 3 : 1);
+
+
+ /* Setup the descriptor */
+ desc.type = type; /* VmbusPacketTypeDataInBand; */
+ desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
+ /* in 8-bytes granularity */
+ desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
+ desc.len8 = (u16)(packetlen_aligned >> 3);
+ desc.trans_id = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
+
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
+
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid, trans_id);
+}
+EXPORT_SYMBOL(vmbus_sendpacket_getid);
+
+/**
+ * vmbus_sendpacket() - Send the specified buffer on the given channel
+ * @channel: Pointer to vmbus_channel structure
+ * @buffer: Pointer to the buffer you want to send the data from.
+ * @bufferlen: Maximum size of what the buffer holds.
+ * @requestid: Identifier of the request
+ * @type: Type of packet that is being sent e.g. negotiate, time
+ * packet etc.
+ * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
+ *
+ * Sends data in @buffer directly to Hyper-V via the vmbus.
+ * This will send the data unparsed to Hyper-V.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u64 requestid,
+ enum vmbus_packet_type type, u32 flags)
+{
+ return vmbus_sendpacket_getid(channel, buffer, bufferlen,
+ requestid, NULL, type, flags);
+}
+EXPORT_SYMBOL(vmbus_sendpacket);
+
+/*
+ * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
+ * packets using a GPADL Direct packet type. This interface allows you
+ * to control notifying the host. This will be useful for sending
+ * batched data. Also the sender can control the send flags
+ * explicitly.
+ */
+int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount, void *buffer, u32 bufferlen,
+ u64 requestid)
+{
+ int i;
+ struct vmbus_channel_packet_page_buffer desc;
+ u32 descsize;
+ u32 packetlen;
+ u32 packetlen_aligned;
+ struct kvec bufferlist[3];
+ u64 aligned_data = 0;
+
+ if (pagecount > MAX_PAGE_BUFFER_COUNT)
+ return -EINVAL;
+
+ /*
+ * Adjust the size down since vmbus_channel_packet_page_buffer is the
+ * largest size we support
+ */
+ descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
+ ((MAX_PAGE_BUFFER_COUNT - pagecount) *
+ sizeof(struct hv_page_buffer));
+ packetlen = descsize + bufferlen;
+ packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+
+ /* Setup the descriptor */
+ desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
+ desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+ desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
+ desc.length8 = (u16)(packetlen_aligned >> 3);
+ desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
+ desc.reserved = 0;
+ desc.rangecount = pagecount;
+
+ for (i = 0; i < pagecount; i++) {
+ desc.range[i].len = pagebuffers[i].len;
+ desc.range[i].offset = pagebuffers[i].offset;
+ desc.range[i].pfn = pagebuffers[i].pfn;
+ }
+
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = descsize;
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
+
+ return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
+}
+EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
+
+/*
+ * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
+ * using a GPADL Direct packet type.
+ * The buffer includes the vmbus descriptor.
+ */
+int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+ struct vmbus_packet_mpb_array *desc,
+ u32 desc_size,
+ void *buffer, u32 bufferlen, u64 requestid)
+{
+ u32 packetlen;
+ u32 packetlen_aligned;
+ struct kvec bufferlist[3];
+ u64 aligned_data = 0;
+
+ packetlen = desc_size + bufferlen;
+ packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+
+ /* Setup the descriptor */
+ desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
+ desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+ desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
+ desc->length8 = (u16)(packetlen_aligned >> 3);
+ desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
+ desc->reserved = 0;
+ desc->rangecount = 1;
+
+ bufferlist[0].iov_base = desc;
+ bufferlist[0].iov_len = desc_size;
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
+
+ return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
+}
+EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
+
+/**
+ * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
+ * @channel: Pointer to vmbus_channel structure
+ * @buffer: Pointer to the buffer you want to receive the data into.
+ * @bufferlen: Maximum size of what the buffer can hold.
+ * @buffer_actual_len: The actual size of the data after it was received.
+ * @requestid: Identifier of the request
+ * @raw: true means keep the vmpacket_descriptor header in the received data.
+ *
+ * Receives directly from the hyper-v vmbus and puts the data it received
+ * into Buffer. This will receive the data unparsed from hyper-v.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+static inline int
+__vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
+ bool raw)
+{
+ return hv_ringbuffer_read(channel, buffer, bufferlen,
+ buffer_actual_len, requestid, raw);
+
+}
+
+int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u32 *buffer_actual_len,
+ u64 *requestid)
+{
+ return __vmbus_recvpacket(channel, buffer, bufferlen,
+ buffer_actual_len, requestid, false);
+}
+EXPORT_SYMBOL(vmbus_recvpacket);
+
+/*
+ * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
+ */
+int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u32 *buffer_actual_len,
+ u64 *requestid)
+{
+ return __vmbus_recvpacket(channel, buffer, bufferlen,
+ buffer_actual_len, requestid, true);
+}
+EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
+
+/*
+ * vmbus_next_request_id - Returns a new request id. It is also
+ * the index at which the guest memory address is stored.
+ * Uses a spin lock to avoid race conditions.
+ * @channel: Pointer to the VMbus channel struct
+ * @rqst_add: Guest memory address to be stored in the array
+ */
+u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
+{
+ struct vmbus_requestor *rqstor = &channel->requestor;
+ unsigned long flags;
+ u64 current_id;
+
+ /* Check rqstor has been initialized */
+ if (!channel->rqstor_size)
+ return VMBUS_NO_RQSTOR;
+
+ lock_requestor(channel, flags);
+ current_id = rqstor->next_request_id;
+
+ /* Requestor array is full */
+ if (current_id >= rqstor->size) {
+ unlock_requestor(channel, flags);
+ return VMBUS_RQST_ERROR;
+ }
+
+ rqstor->next_request_id = rqstor->req_arr[current_id];
+ rqstor->req_arr[current_id] = rqst_addr;
+
+ /* The already held spin lock provides atomicity */
+ bitmap_set(rqstor->req_bitmap, current_id, 1);
+
+ unlock_requestor(channel, flags);
+
+ /*
+ * Cannot return an ID of 0, which is reserved for an unsolicited
+ * message from Hyper-V; Hyper-V does not acknowledge (respond to)
+ * VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED requests with ID of
+ * 0 sent by the guest.
+ */
+ return current_id + 1;
+}
+EXPORT_SYMBOL_GPL(vmbus_next_request_id);
+
+/* As in vmbus_request_addr_match() but without the requestor lock */
+u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr)
+{
+ struct vmbus_requestor *rqstor = &channel->requestor;
+ u64 req_addr;
+
+ /* Check rqstor has been initialized */
+ if (!channel->rqstor_size)
+ return VMBUS_NO_RQSTOR;
+
+ /* Hyper-V can send an unsolicited message with ID of 0 */
+ if (!trans_id)
+ return VMBUS_RQST_ERROR;
+
+ /* Data corresponding to trans_id is stored at trans_id - 1 */
+ trans_id--;
+
+ /* Invalid trans_id */
+ if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap))
+ return VMBUS_RQST_ERROR;
+
+ req_addr = rqstor->req_arr[trans_id];
+ if (rqst_addr == VMBUS_RQST_ADDR_ANY || req_addr == rqst_addr) {
+ rqstor->req_arr[trans_id] = rqstor->next_request_id;
+ rqstor->next_request_id = trans_id;
+
+ /* The already held spin lock provides atomicity */
+ bitmap_clear(rqstor->req_bitmap, trans_id, 1);
+ }
+
+ return req_addr;
+}
+EXPORT_SYMBOL_GPL(__vmbus_request_addr_match);
+
+/*
+ * vmbus_request_addr_match - Clears/removes @trans_id from the @channel's
+ * requestor, provided the memory address stored at @trans_id equals @rqst_addr
+ * (or provided @rqst_addr matches the sentinel value VMBUS_RQST_ADDR_ANY).
+ *
+ * Returns the memory address stored at @trans_id, or VMBUS_RQST_ERROR if
+ * @trans_id is not contained in the requestor.
+ *
+ * Acquires and releases the requestor spin lock.
+ */
+u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr)
+{
+ unsigned long flags;
+ u64 req_addr;
+
+ lock_requestor(channel, flags);
+ req_addr = __vmbus_request_addr_match(channel, trans_id, rqst_addr);
+ unlock_requestor(channel, flags);
+
+ return req_addr;
+}
+EXPORT_SYMBOL_GPL(vmbus_request_addr_match);
+
+/*
+ * vmbus_request_addr - Returns the memory address stored at @trans_id
+ * in @rqstor. Uses a spin lock to avoid race conditions.
+ * @channel: Pointer to the VMbus channel struct
+ * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
+ * next request id.
+ */
+u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
+{
+ return vmbus_request_addr_match(channel, trans_id, VMBUS_RQST_ADDR_ANY);
+}
+EXPORT_SYMBOL_GPL(vmbus_request_addr);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
new file mode 100644
index 000000000..d95e567a1
--- /dev/null
+++ b/drivers/hv/channel_mgmt.c
@@ -0,0 +1,1619 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <linux/hyperv.h>
+#include <asm/mshyperv.h>
+#include <linux/sched/isolation.h>
+
+#include "hyperv_vmbus.h"
+
+static void init_vp_index(struct vmbus_channel *channel);
+
+const struct vmbus_device vmbus_devs[] = {
+ /* IDE */
+ { .dev_type = HV_IDE,
+ HV_IDE_GUID,
+ .perf_device = true,
+ .allowed_in_isolated = false,
+ },
+
+ /* SCSI */
+ { .dev_type = HV_SCSI,
+ HV_SCSI_GUID,
+ .perf_device = true,
+ .allowed_in_isolated = true,
+ },
+
+ /* Fibre Channel */
+ { .dev_type = HV_FC,
+ HV_SYNTHFC_GUID,
+ .perf_device = true,
+ .allowed_in_isolated = false,
+ },
+
+ /* Synthetic NIC */
+ { .dev_type = HV_NIC,
+ HV_NIC_GUID,
+ .perf_device = true,
+ .allowed_in_isolated = true,
+ },
+
+ /* Network Direct */
+ { .dev_type = HV_ND,
+ HV_ND_GUID,
+ .perf_device = true,
+ .allowed_in_isolated = false,
+ },
+
+ /* PCIE */
+ { .dev_type = HV_PCIE,
+ HV_PCIE_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = false,
+ },
+
+ /* Synthetic Frame Buffer */
+ { .dev_type = HV_FB,
+ HV_SYNTHVID_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = false,
+ },
+
+ /* Synthetic Keyboard */
+ { .dev_type = HV_KBD,
+ HV_KBD_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = false,
+ },
+
+ /* Synthetic MOUSE */
+ { .dev_type = HV_MOUSE,
+ HV_MOUSE_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = false,
+ },
+
+ /* KVP */
+ { .dev_type = HV_KVP,
+ HV_KVP_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = false,
+ },
+
+ /* Time Synch */
+ { .dev_type = HV_TS,
+ HV_TS_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = true,
+ },
+
+ /* Heartbeat */
+ { .dev_type = HV_HB,
+ HV_HEART_BEAT_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = true,
+ },
+
+ /* Shutdown */
+ { .dev_type = HV_SHUTDOWN,
+ HV_SHUTDOWN_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = true,
+ },
+
+ /* File copy */
+ { .dev_type = HV_FCOPY,
+ HV_FCOPY_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = false,
+ },
+
+ /* Backup */
+ { .dev_type = HV_BACKUP,
+ HV_VSS_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = false,
+ },
+
+ /* Dynamic Memory */
+ { .dev_type = HV_DM,
+ HV_DM_GUID,
+ .perf_device = false,
+ .allowed_in_isolated = false,
+ },
+
+ /* Unknown GUID */
+ { .dev_type = HV_UNKNOWN,
+ .perf_device = false,
+ .allowed_in_isolated = false,
+ },
+};
+
+static const struct {
+ guid_t guid;
+} vmbus_unsupported_devs[] = {
+ { HV_AVMA1_GUID },
+ { HV_AVMA2_GUID },
+ { HV_RDV_GUID },
+ { HV_IMC_GUID },
+};
+
+/*
+ * The rescinded channel may be blocked waiting for a response from the host;
+ * take care of that.
+ */
+static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
+{
+ struct vmbus_channel_msginfo *msginfo;
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ channel->rescind = true;
+ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
+ msglistentry) {
+
+ if (msginfo->waiting_channel == channel) {
+ complete(&msginfo->waitevent);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
+static bool is_unsupported_vmbus_devs(const guid_t *guid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
+ if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
+ return true;
+ return false;
+}
+
+static u16 hv_get_dev_type(const struct vmbus_channel *channel)
+{
+ const guid_t *guid = &channel->offermsg.offer.if_type;
+ u16 i;
+
+ if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
+ return HV_UNKNOWN;
+
+ for (i = HV_IDE; i < HV_UNKNOWN; i++) {
+ if (guid_equal(guid, &vmbus_devs[i].guid))
+ return i;
+ }
+ pr_info("Unknown GUID: %pUl\n", guid);
+ return i;
+}
+
+/**
+ * vmbus_prep_negotiate_resp() - Create default response for Negotiate message
+ * @icmsghdrp: Pointer to msg header structure
+ * @buf: Raw buffer channel data
+ * @buflen: Length of the raw buffer channel data.
+ * @fw_version: The framework versions we can support.
+ * @fw_vercnt: The size of @fw_version.
+ * @srv_version: The service versions we can support.
+ * @srv_vercnt: The size of @srv_version.
+ * @nego_fw_version: The selected framework version.
+ * @nego_srv_version: The selected service version.
+ *
+ * Note: Versions are given in decreasing order.
+ *
+ * Set up and fill in default negotiate response message.
+ * Mainly used by Hyper-V drivers.
+ */
+bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+ u32 buflen, const int *fw_version, int fw_vercnt,
+ const int *srv_version, int srv_vercnt,
+ int *nego_fw_version, int *nego_srv_version)
+{
+ int icframe_major, icframe_minor;
+ int icmsg_major, icmsg_minor;
+ int fw_major, fw_minor;
+ int srv_major, srv_minor;
+ int i, j;
+ bool found_match = false;
+ struct icmsg_negotiate *negop;
+
+ /* Check that there's enough space for icframe_vercnt, icmsg_vercnt */
+ if (buflen < ICMSG_HDR + offsetof(struct icmsg_negotiate, reserved)) {
+ pr_err_ratelimited("Invalid icmsg negotiate\n");
+ return false;
+ }
+
+ icmsghdrp->icmsgsize = 0x10;
+ negop = (struct icmsg_negotiate *)&buf[ICMSG_HDR];
+
+ icframe_major = negop->icframe_vercnt;
+ icframe_minor = 0;
+
+ icmsg_major = negop->icmsg_vercnt;
+ icmsg_minor = 0;
+
+ /* Validate negop packet */
+ if (icframe_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
+ icmsg_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
+ ICMSG_NEGOTIATE_PKT_SIZE(icframe_major, icmsg_major) > buflen) {
+ pr_err_ratelimited("Invalid icmsg negotiate - icframe_major: %u, icmsg_major: %u\n",
+ icframe_major, icmsg_major);
+ goto fw_error;
+ }
+
+ /*
+ * Select the framework version number we will
+ * support.
+ */
+
+ for (i = 0; i < fw_vercnt; i++) {
+ fw_major = (fw_version[i] >> 16);
+ fw_minor = (fw_version[i] & 0xFFFF);
+
+ for (j = 0; j < negop->icframe_vercnt; j++) {
+ if ((negop->icversion_data[j].major == fw_major) &&
+ (negop->icversion_data[j].minor == fw_minor)) {
+ icframe_major = negop->icversion_data[j].major;
+ icframe_minor = negop->icversion_data[j].minor;
+ found_match = true;
+ break;
+ }
+ }
+
+ if (found_match)
+ break;
+ }
+
+ if (!found_match)
+ goto fw_error;
+
+ found_match = false;
+
+ for (i = 0; i < srv_vercnt; i++) {
+ srv_major = (srv_version[i] >> 16);
+ srv_minor = (srv_version[i] & 0xFFFF);
+
+ for (j = negop->icframe_vercnt;
+ (j < negop->icframe_vercnt + negop->icmsg_vercnt);
+ j++) {
+
+ if ((negop->icversion_data[j].major == srv_major) &&
+ (negop->icversion_data[j].minor == srv_minor)) {
+
+ icmsg_major = negop->icversion_data[j].major;
+ icmsg_minor = negop->icversion_data[j].minor;
+ found_match = true;
+ break;
+ }
+ }
+
+ if (found_match)
+ break;
+ }
+
+ /*
+ * Respond with the framework and service
+ * version numbers we can support.
+ */
+
+fw_error:
+ if (!found_match) {
+ negop->icframe_vercnt = 0;
+ negop->icmsg_vercnt = 0;
+ } else {
+ negop->icframe_vercnt = 1;
+ negop->icmsg_vercnt = 1;
+ }
+
+ if (nego_fw_version)
+ *nego_fw_version = (icframe_major << 16) | icframe_minor;
+
+ if (nego_srv_version)
+ *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
+
+ negop->icversion_data[0].major = icframe_major;
+ negop->icversion_data[0].minor = icframe_minor;
+ negop->icversion_data[1].major = icmsg_major;
+ negop->icversion_data[1].minor = icmsg_minor;
+ return found_match;
+}
+EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
+
+/*
+ * alloc_channel - Allocate and initialize a vmbus channel object
+ */
+static struct vmbus_channel *alloc_channel(void)
+{
+ struct vmbus_channel *channel;
+
+ channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
+ if (!channel)
+ return NULL;
+
+ spin_lock_init(&channel->sched_lock);
+ init_completion(&channel->rescind_event);
+
+ INIT_LIST_HEAD(&channel->sc_list);
+
+ tasklet_init(&channel->callback_event,
+ vmbus_on_event, (unsigned long)channel);
+
+ hv_ringbuffer_pre_init(channel);
+
+ return channel;
+}
+
+/*
+ * free_channel - Release the resources used by the vmbus channel object
+ */
+static void free_channel(struct vmbus_channel *channel)
+{
+ tasklet_kill(&channel->callback_event);
+ vmbus_remove_channel_attr_group(channel);
+
+ kobject_put(&channel->kobj);
+}
+
+void vmbus_channel_map_relid(struct vmbus_channel *channel)
+{
+ if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
+ return;
+ /*
+ * The mapping of the channel's relid is visible from the CPUs that
+ * execute vmbus_chan_sched() by the time that vmbus_chan_sched() will
+ * execute:
+ *
+ * (a) In the "normal (i.e., not resuming from hibernation)" path,
+ * the full barrier in virt_store_mb() guarantees that the store
+ * is propagated to all CPUs before the add_channel_work work
+ * is queued. In turn, add_channel_work is queued before the
+ * channel's ring buffer is allocated/initialized and the
+ * OPENCHANNEL message for the channel is sent in vmbus_open().
+ * Hyper-V won't start sending the interrupts for the channel
+ * before the OPENCHANNEL message is acked. The memory barrier
+ * in vmbus_chan_sched() -> sync_test_and_clear_bit() ensures
+ * that vmbus_chan_sched() must find the channel's relid in
+ * recv_int_page before retrieving the channel pointer from the
+ * array of channels.
+ *
+ * (b) In the "resuming from hibernation" path, the virt_store_mb()
+ * guarantees that the store is propagated to all CPUs before
+ * the VMBus connection is marked as ready for the resume event
+ * (cf. check_ready_for_resume_event()). The interrupt handler
+ * of the VMBus driver and vmbus_chan_sched() can not run before
+ * vmbus_bus_resume() has completed execution (cf. resume_noirq).
+ */
+ virt_store_mb(
+ vmbus_connection.channels[channel->offermsg.child_relid],
+ channel);
+}
+
+void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
+{
+ if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
+ return;
+ WRITE_ONCE(
+ vmbus_connection.channels[channel->offermsg.child_relid],
+ NULL);
+}
+
+static void vmbus_release_relid(u32 relid)
+{
+ struct vmbus_channel_relid_released msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
+ msg.child_relid = relid;
+ msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
+ ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
+ true);
+
+ trace_vmbus_release_relid(&msg, ret);
+}
+
+void hv_process_channel_removal(struct vmbus_channel *channel)
+{
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+ BUG_ON(!channel->rescind);
+
+ /*
+ * hv_process_channel_removal() could find INVALID_RELID only for
+ * hv_sock channels. See the inline comments in vmbus_onoffer().
+ */
+ WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
+ !is_hvsock_channel(channel));
+
+ /*
+ * Upon suspend, an in-use hv_sock channel is removed from the array of
+ * channels and the relid is invalidated. After hibernation, when the
+ * user-space application destroys the channel, it's unnecessary and
+ * unsafe to remove the channel from the array of channels. See also
+ * the inline comments before the call of vmbus_release_relid() below.
+ */
+ if (channel->offermsg.child_relid != INVALID_RELID)
+ vmbus_channel_unmap_relid(channel);
+
+ if (channel->primary_channel == NULL)
+ list_del(&channel->listentry);
+ else
+ list_del(&channel->sc_list);
+
+ /*
+ * If this is a "perf" channel, updates the hv_numa_map[] masks so that
+ * init_vp_index() can (re-)use the CPU.
+ */
+ if (hv_is_perf_channel(channel))
+ hv_clear_allocated_cpu(channel->target_cpu);
+
+ /*
+ * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
+ * the relid is invalidated; after hibernation, when the user-space app
+ * destroys the channel, the relid is INVALID_RELID, and in this case
+ * it's unnecessary and unsafe to release the old relid, since the same
+ * relid can refer to a completely different channel now.
+ */
+ if (channel->offermsg.child_relid != INVALID_RELID)
+ vmbus_release_relid(channel->offermsg.child_relid);
+
+ free_channel(channel);
+}
+
+void vmbus_free_channels(void)
+{
+ struct vmbus_channel *channel, *tmp;
+
+ list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
+ listentry) {
+ /* hv_process_channel_removal() needs this */
+ channel->rescind = true;
+
+ vmbus_device_unregister(channel->device_obj);
+ }
+}
+
+/* Note: the function can run concurrently for primary/sub channels. */
+static void vmbus_add_channel_work(struct work_struct *work)
+{
+ struct vmbus_channel *newchannel =
+ container_of(work, struct vmbus_channel, add_channel_work);
+ struct vmbus_channel *primary_channel = newchannel->primary_channel;
+ int ret;
+
+ /*
+ * This state is used to indicate a successful open
+ * so that when we do close the channel normally, we
+ * can cleanup properly.
+ */
+ newchannel->state = CHANNEL_OPEN_STATE;
+
+ if (primary_channel != NULL) {
+ /* newchannel is a sub-channel. */
+ struct hv_device *dev = primary_channel->device_obj;
+
+ if (vmbus_add_channel_kobj(dev, newchannel))
+ goto err_deq_chan;
+
+ if (primary_channel->sc_creation_callback != NULL)
+ primary_channel->sc_creation_callback(newchannel);
+
+ newchannel->probe_done = true;
+ return;
+ }
+
+ /*
+ * Start the process of binding the primary channel to the driver
+ */
+ newchannel->device_obj = vmbus_device_create(
+ &newchannel->offermsg.offer.if_type,
+ &newchannel->offermsg.offer.if_instance,
+ newchannel);
+ if (!newchannel->device_obj)
+ goto err_deq_chan;
+
+ newchannel->device_obj->device_id = newchannel->device_id;
+ /*
+ * Add the new device to the bus. This will kick off device-driver
+ * binding which eventually invokes the device driver's AddDevice()
+ * method.
+ *
+ * If vmbus_device_register() fails, the 'device_obj' is freed in
+ * vmbus_device_release() as called by device_unregister() in the
+ * error path of vmbus_device_register(). In the outside error
+ * path, there's no need to free it.
+ */
+ ret = vmbus_device_register(newchannel->device_obj);
+
+ if (ret != 0) {
+ pr_err("unable to add child device object (relid %d)\n",
+ newchannel->offermsg.child_relid);
+ goto err_deq_chan;
+ }
+
+ newchannel->probe_done = true;
+ return;
+
+err_deq_chan:
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ /*
+ * We need to set the flag, otherwise
+ * vmbus_onoffer_rescind() can be blocked.
+ */
+ newchannel->probe_done = true;
+
+ if (primary_channel == NULL)
+ list_del(&newchannel->listentry);
+ else
+ list_del(&newchannel->sc_list);
+
+ /* vmbus_process_offer() has mapped the channel. */
+ vmbus_channel_unmap_relid(newchannel);
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ vmbus_release_relid(newchannel->offermsg.child_relid);
+
+ free_channel(newchannel);
+}
+
+/*
+ * vmbus_process_offer - Process the offer by creating a channel/device
+ * associated with this offer
+ */
+static void vmbus_process_offer(struct vmbus_channel *newchannel)
+{
+ struct vmbus_channel *channel;
+ struct workqueue_struct *wq;
+ bool fnew = true;
+
+ /*
+ * Synchronize vmbus_process_offer() and CPU hotplugging:
+ *
+ * CPU1 CPU2
+ *
+ * [vmbus_process_offer()] [Hot removal of the CPU]
+ *
+ * CPU_READ_LOCK CPUS_WRITE_LOCK
+ * LOAD cpu_online_mask SEARCH chn_list
+ * STORE target_cpu LOAD target_cpu
+ * INSERT chn_list STORE cpu_online_mask
+ * CPUS_READ_UNLOCK CPUS_WRITE_UNLOCK
+ *
+ * Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
+ * CPU2's SEARCH from *not* seeing CPU1's INSERT
+ *
+ * Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
+ * CPU2's LOAD from *not* seing CPU1's STORE
+ */
+ cpus_read_lock();
+
+ /*
+ * Serializes the modifications of the chn_list list as well as
+ * the accesses to next_numa_node_id in init_vp_index().
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (guid_equal(&channel->offermsg.offer.if_type,
+ &newchannel->offermsg.offer.if_type) &&
+ guid_equal(&channel->offermsg.offer.if_instance,
+ &newchannel->offermsg.offer.if_instance)) {
+ fnew = false;
+ newchannel->primary_channel = channel;
+ break;
+ }
+ }
+
+ init_vp_index(newchannel);
+
+ /* Remember the channels that should be cleaned up upon suspend. */
+ if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
+ atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
+
+ /*
+ * Now that we have acquired the channel_mutex,
+ * we can release the potentially racing rescind thread.
+ */
+ atomic_dec(&vmbus_connection.offer_in_progress);
+
+ if (fnew) {
+ list_add_tail(&newchannel->listentry,
+ &vmbus_connection.chn_list);
+ } else {
+ /*
+ * Check to see if this is a valid sub-channel.
+ */
+ if (newchannel->offermsg.offer.sub_channel_index == 0) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
+ /*
+ * Don't call free_channel(), because newchannel->kobj
+ * is not initialized yet.
+ */
+ kfree(newchannel);
+ WARN_ON_ONCE(1);
+ return;
+ }
+ /*
+ * Process the sub-channel.
+ */
+ list_add_tail(&newchannel->sc_list, &channel->sc_list);
+ }
+
+ vmbus_channel_map_relid(newchannel);
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
+
+ /*
+ * vmbus_process_offer() mustn't call channel->sc_creation_callback()
+ * directly for sub-channels, because sc_creation_callback() ->
+ * vmbus_open() may never get the host's response to the
+ * OPEN_CHANNEL message (the host may rescind a channel at any time,
+ * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
+ * may not wake up the vmbus_open() as it's blocked due to a non-zero
+ * vmbus_connection.offer_in_progress, and finally we have a deadlock.
+ *
+ * The above is also true for primary channels, if the related device
+ * drivers use sync probing mode by default.
+ *
+ * And, usually the handling of primary channels and sub-channels can
+ * depend on each other, so we should offload them to different
+ * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
+ * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
+ * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
+ * and waits for all the sub-channels to appear, but the latter
+ * can't get the rtnl_lock and this blocks the handling of
+ * sub-channels.
+ */
+ INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
+ wq = fnew ? vmbus_connection.handle_primary_chan_wq :
+ vmbus_connection.handle_sub_chan_wq;
+ queue_work(wq, &newchannel->add_channel_work);
+}
+
+/*
+ * Check if CPUs used by other channels of the same device.
+ * It should only be called by init_vp_index().
+ */
+static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
+{
+ struct vmbus_channel *primary = chn->primary_channel;
+ struct vmbus_channel *sc;
+
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+
+ if (!primary)
+ return false;
+
+ if (primary->target_cpu == cpu)
+ return true;
+
+ list_for_each_entry(sc, &primary->sc_list, sc_list)
+ if (sc != chn && sc->target_cpu == cpu)
+ return true;
+
+ return false;
+}
+
+/*
+ * We use this state to statically distribute the channel interrupt load.
+ */
+static int next_numa_node_id;
+
+/*
+ * We can statically distribute the incoming channel interrupt load
+ * by binding a channel to VCPU.
+ *
+ * For non-performance critical channels we assign the VMBUS_CONNECT_CPU.
+ * Performance critical channels will be distributed evenly among all
+ * the available NUMA nodes. Once the node is assigned, we will assign
+ * the CPU based on a simple round robin scheme.
+ */
+static void init_vp_index(struct vmbus_channel *channel)
+{
+ bool perf_chn = hv_is_perf_channel(channel);
+ u32 i, ncpu = num_online_cpus();
+ cpumask_var_t available_mask;
+ struct cpumask *allocated_mask;
+ const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
+ u32 target_cpu;
+ int numa_node;
+
+ if (!perf_chn ||
+ !alloc_cpumask_var(&available_mask, GFP_KERNEL) ||
+ cpumask_empty(hk_mask)) {
+ /*
+ * If the channel is not a performance critical
+ * channel, bind it to VMBUS_CONNECT_CPU.
+ * In case alloc_cpumask_var() fails, bind it to
+ * VMBUS_CONNECT_CPU.
+ * If all the cpus are isolated, bind it to
+ * VMBUS_CONNECT_CPU.
+ */
+ channel->target_cpu = VMBUS_CONNECT_CPU;
+ if (perf_chn)
+ hv_set_allocated_cpu(VMBUS_CONNECT_CPU);
+ return;
+ }
+
+ for (i = 1; i <= ncpu + 1; i++) {
+ while (true) {
+ numa_node = next_numa_node_id++;
+ if (numa_node == nr_node_ids) {
+ next_numa_node_id = 0;
+ continue;
+ }
+ if (cpumask_empty(cpumask_of_node(numa_node)))
+ continue;
+ break;
+ }
+ allocated_mask = &hv_context.hv_numa_map[numa_node];
+
+retry:
+ cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node));
+ cpumask_and(available_mask, available_mask, hk_mask);
+
+ if (cpumask_empty(available_mask)) {
+ /*
+ * We have cycled through all the CPUs in the node;
+ * reset the allocated map.
+ */
+ cpumask_clear(allocated_mask);
+ goto retry;
+ }
+
+ target_cpu = cpumask_first(available_mask);
+ cpumask_set_cpu(target_cpu, allocated_mask);
+
+ if (channel->offermsg.offer.sub_channel_index >= ncpu ||
+ i > ncpu || !hv_cpuself_used(target_cpu, channel))
+ break;
+ }
+
+ channel->target_cpu = target_cpu;
+
+ free_cpumask_var(available_mask);
+}
+
+#define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */
+#define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */
+#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
+#define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */
+#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
+
+static void vmbus_wait_for_unload(void)
+{
+ int cpu;
+ void *page_addr;
+ struct hv_message *msg;
+ struct vmbus_channel_message_header *hdr;
+ u32 message_type, i;
+
+ /*
+ * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
+ * used for initial contact or to CPU0 depending on host version. When
+ * we're crashing on a different CPU let's hope that IRQ handler on
+ * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
+ * functional and vmbus_unload_response() will complete
+ * vmbus_connection.unload_event. If not, the last thing we can do is
+ * read message pages for all CPUs directly.
+ *
+ * Wait up to 100 seconds since an Azure host must writeback any dirty
+ * data in its disk cache before the VMbus UNLOAD request will
+ * complete. This flushing has been empirically observed to take up
+ * to 50 seconds in cases with a lot of dirty data, so allow additional
+ * leeway and for inaccuracies in mdelay(). But eventually time out so
+ * that the panic path can't get hung forever in case the response
+ * message isn't seen.
+ */
+ for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
+ if (completion_done(&vmbus_connection.unload_event))
+ goto completed;
+
+ for_each_present_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ /*
+ * In a CoCo VM the synic_message_page is not allocated
+ * in hv_synic_alloc(). Instead it is set/cleared in
+ * hv_synic_enable_regs() and hv_synic_disable_regs()
+ * such that it is set only when the CPU is online. If
+ * not all present CPUs are online, the message page
+ * might be NULL, so skip such CPUs.
+ */
+ page_addr = hv_cpu->synic_message_page;
+ if (!page_addr)
+ continue;
+
+ msg = (struct hv_message *)page_addr
+ + VMBUS_MESSAGE_SINT;
+
+ message_type = READ_ONCE(msg->header.message_type);
+ if (message_type == HVMSG_NONE)
+ continue;
+
+ hdr = (struct vmbus_channel_message_header *)
+ msg->u.payload;
+
+ if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+ complete(&vmbus_connection.unload_event);
+
+ vmbus_signal_eom(msg, message_type);
+ }
+
+ /*
+ * Give a notice periodically so someone watching the
+ * serial output won't think it is completely hung.
+ */
+ if (!(i % UNLOAD_MSG_LOOPS))
+ pr_notice("Waiting for VMBus UNLOAD to complete\n");
+
+ mdelay(UNLOAD_DELAY_UNIT_MS);
+ }
+ pr_err("Continuing even though VMBus UNLOAD did not complete\n");
+
+completed:
+ /*
+ * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
+ * maybe-pending messages on all CPUs to be able to receive new
+ * messages after we reconnect.
+ */
+ for_each_present_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ page_addr = hv_cpu->synic_message_page;
+ if (!page_addr)
+ continue;
+
+ msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+ msg->header.message_type = HVMSG_NONE;
+ }
+}
+
+/*
+ * vmbus_unload_response - Handler for the unload response.
+ */
+static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
+{
+ /*
+ * This is a global event; just wakeup the waiting thread.
+ * Once we successfully unload, we can cleanup the monitor state.
+ *
+ * NB. A malicious or compromised Hyper-V could send a spurious
+ * message of type CHANNELMSG_UNLOAD_RESPONSE, and trigger a call
+ * of the complete() below. Make sure that unload_event has been
+ * initialized by the time this complete() is executed.
+ */
+ complete(&vmbus_connection.unload_event);
+}
+
+void vmbus_initiate_unload(bool crash)
+{
+ struct vmbus_channel_message_header hdr;
+
+ if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
+ return;
+
+ /* Pre-Win2012R2 hosts don't support reconnect */
+ if (vmbus_proto_version < VERSION_WIN8_1)
+ return;
+
+ reinit_completion(&vmbus_connection.unload_event);
+ memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
+ hdr.msgtype = CHANNELMSG_UNLOAD;
+ vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
+ !crash);
+
+ /*
+ * vmbus_initiate_unload() is also called on crash and the crash can be
+ * happening in an interrupt context, where scheduling is impossible.
+ */
+ if (!crash)
+ wait_for_completion(&vmbus_connection.unload_event);
+ else
+ vmbus_wait_for_unload();
+}
+
+static void check_ready_for_resume_event(void)
+{
+ /*
+ * If all the old primary channels have been fixed up, then it's safe
+ * to resume.
+ */
+ if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
+ complete(&vmbus_connection.ready_for_resume_event);
+}
+
+static void vmbus_setup_channel_state(struct vmbus_channel *channel,
+ struct vmbus_channel_offer_channel *offer)
+{
+ /*
+ * Setup state for signalling the host.
+ */
+ channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
+
+ channel->is_dedicated_interrupt =
+ (offer->is_dedicated_interrupt != 0);
+ channel->sig_event = offer->connection_id;
+
+ memcpy(&channel->offermsg, offer,
+ sizeof(struct vmbus_channel_offer_channel));
+ channel->monitor_grp = (u8)offer->monitorid / 32;
+ channel->monitor_bit = (u8)offer->monitorid % 32;
+ channel->device_id = hv_get_dev_type(channel);
+}
+
+/*
+ * find_primary_channel_by_offer - Get the channel object given the new offer.
+ * This is only used in the resume path of hibernation.
+ */
+static struct vmbus_channel *
+find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
+{
+ struct vmbus_channel *channel = NULL, *iter;
+ const guid_t *inst1, *inst2;
+
+ /* Ignore sub-channel offers. */
+ if (offer->offer.sub_channel_index != 0)
+ return NULL;
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
+ inst1 = &iter->offermsg.offer.if_instance;
+ inst2 = &offer->offer.if_instance;
+
+ if (guid_equal(inst1, inst2)) {
+ channel = iter;
+ break;
+ }
+ }
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ return channel;
+}
+
+static bool vmbus_is_valid_offer(const struct vmbus_channel_offer_channel *offer)
+{
+ const guid_t *guid = &offer->offer.if_type;
+ u16 i;
+
+ if (!hv_is_isolation_supported())
+ return true;
+
+ if (is_hvsock_offer(offer))
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) {
+ if (guid_equal(guid, &vmbus_devs[i].guid))
+ return vmbus_devs[i].allowed_in_isolated;
+ }
+ return false;
+}
+
+/*
+ * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
+ *
+ */
+static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
+{
+ struct vmbus_channel_offer_channel *offer;
+ struct vmbus_channel *oldchannel, *newchannel;
+ size_t offer_sz;
+
+ offer = (struct vmbus_channel_offer_channel *)hdr;
+
+ trace_vmbus_onoffer(offer);
+
+ if (!vmbus_is_valid_offer(offer)) {
+ pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n",
+ offer->child_relid);
+ atomic_dec(&vmbus_connection.offer_in_progress);
+ return;
+ }
+
+ oldchannel = find_primary_channel_by_offer(offer);
+
+ if (oldchannel != NULL) {
+ /*
+ * We're resuming from hibernation: all the sub-channel and
+ * hv_sock channels we had before the hibernation should have
+ * been cleaned up, and now we must be seeing a re-offered
+ * primary channel that we had before the hibernation.
+ */
+
+ /*
+ * { Initially: channel relid = INVALID_RELID,
+ * channels[valid_relid] = NULL }
+ *
+ * CPU1 CPU2
+ *
+ * [vmbus_onoffer()] [vmbus_device_release()]
+ *
+ * LOCK channel_mutex LOCK channel_mutex
+ * STORE channel relid = valid_relid LOAD r1 = channel relid
+ * MAP_RELID channel if (r1 != INVALID_RELID)
+ * UNLOCK channel_mutex UNMAP_RELID channel
+ * UNLOCK channel_mutex
+ *
+ * Forbids: r1 == valid_relid &&
+ * channels[valid_relid] == channel
+ *
+ * Note. r1 can be INVALID_RELID only for an hv_sock channel.
+ * None of the hv_sock channels which were present before the
+ * suspend are re-offered upon the resume. See the WARN_ON()
+ * in hv_process_channel_removal().
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ atomic_dec(&vmbus_connection.offer_in_progress);
+
+ WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
+ /* Fix up the relid. */
+ oldchannel->offermsg.child_relid = offer->child_relid;
+
+ offer_sz = sizeof(*offer);
+ if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
+ /*
+ * This is not an error, since the host can also change
+ * the other field(s) of the offer, e.g. on WS RS5
+ * (Build 17763), the offer->connection_id of the
+ * Mellanox VF vmbus device can change when the host
+ * reoffers the device upon resume.
+ */
+ pr_debug("vmbus offer changed: relid=%d\n",
+ offer->child_relid);
+
+ print_hex_dump_debug("Old vmbus offer: ",
+ DUMP_PREFIX_OFFSET, 16, 4,
+ &oldchannel->offermsg, offer_sz,
+ false);
+ print_hex_dump_debug("New vmbus offer: ",
+ DUMP_PREFIX_OFFSET, 16, 4,
+ offer, offer_sz, false);
+
+ /* Fix up the old channel. */
+ vmbus_setup_channel_state(oldchannel, offer);
+ }
+
+ /* Add the channel back to the array of channels. */
+ vmbus_channel_map_relid(oldchannel);
+ check_ready_for_resume_event();
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return;
+ }
+
+ /* Allocate the channel object and save this offer. */
+ newchannel = alloc_channel();
+ if (!newchannel) {
+ vmbus_release_relid(offer->child_relid);
+ atomic_dec(&vmbus_connection.offer_in_progress);
+ pr_err("Unable to allocate channel object\n");
+ return;
+ }
+
+ vmbus_setup_channel_state(newchannel, offer);
+
+ vmbus_process_offer(newchannel);
+}
+
+static void check_ready_for_suspend_event(void)
+{
+ /*
+ * If all the sub-channels or hv_sock channels have been cleaned up,
+ * then it's safe to suspend.
+ */
+ if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
+ complete(&vmbus_connection.ready_for_suspend_event);
+}
+
+/*
+ * vmbus_onoffer_rescind - Rescind offer handler.
+ *
+ * We queue a work item to process this offer synchronously
+ */
+static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
+{
+ struct vmbus_channel_rescind_offer *rescind;
+ struct vmbus_channel *channel;
+ struct device *dev;
+ bool clean_up_chan_for_suspend;
+
+ rescind = (struct vmbus_channel_rescind_offer *)hdr;
+
+ trace_vmbus_onoffer_rescind(rescind);
+
+ /*
+ * The offer msg and the corresponding rescind msg
+ * from the host are guranteed to be ordered -
+ * offer comes in first and then the rescind.
+ * Since we process these events in work elements,
+ * and with preemption, we may end up processing
+ * the events out of order. We rely on the synchronization
+ * provided by offer_in_progress and by channel_mutex for
+ * ordering these events:
+ *
+ * { Initially: offer_in_progress = 1 }
+ *
+ * CPU1 CPU2
+ *
+ * [vmbus_onoffer()] [vmbus_onoffer_rescind()]
+ *
+ * LOCK channel_mutex WAIT_ON offer_in_progress == 0
+ * DECREMENT offer_in_progress LOCK channel_mutex
+ * STORE channels[] LOAD channels[]
+ * UNLOCK channel_mutex UNLOCK channel_mutex
+ *
+ * Forbids: CPU2's LOAD from *not* seeing CPU1's STORE
+ */
+
+ while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
+ /*
+ * We wait here until any channel offer is currently
+ * being processed.
+ */
+ msleep(1);
+ }
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+ channel = relid2channel(rescind->child_relid);
+ if (channel != NULL) {
+ /*
+ * Guarantee that no other instance of vmbus_onoffer_rescind()
+ * has got a reference to the channel object. Synchronize on
+ * &vmbus_connection.channel_mutex.
+ */
+ if (channel->rescind_ref) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return;
+ }
+ channel->rescind_ref = true;
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ if (channel == NULL) {
+ /*
+ * We failed in processing the offer message;
+ * we would have cleaned up the relid in that
+ * failure path.
+ */
+ return;
+ }
+
+ clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
+ is_sub_channel(channel);
+ /*
+ * Before setting channel->rescind in vmbus_rescind_cleanup(), we
+ * should make sure the channel callback is not running any more.
+ */
+ vmbus_reset_channel_cb(channel);
+
+ /*
+ * Now wait for offer handling to complete.
+ */
+ vmbus_rescind_cleanup(channel);
+ while (READ_ONCE(channel->probe_done) == false) {
+ /*
+ * We wait here until any channel offer is currently
+ * being processed.
+ */
+ msleep(1);
+ }
+
+ /*
+ * At this point, the rescind handling can proceed safely.
+ */
+
+ if (channel->device_obj) {
+ if (channel->chn_rescind_callback) {
+ channel->chn_rescind_callback(channel);
+
+ if (clean_up_chan_for_suspend)
+ check_ready_for_suspend_event();
+
+ return;
+ }
+ /*
+ * We will have to unregister this device from the
+ * driver core.
+ */
+ dev = get_device(&channel->device_obj->device);
+ if (dev) {
+ vmbus_device_unregister(channel->device_obj);
+ put_device(dev);
+ }
+ } else if (channel->primary_channel != NULL) {
+ /*
+ * Sub-channel is being rescinded. Following is the channel
+ * close sequence when initiated from the driveri (refer to
+ * vmbus_close() for details):
+ * 1. Close all sub-channels first
+ * 2. Then close the primary channel.
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+ if (channel->state == CHANNEL_OPEN_STATE) {
+ /*
+ * The channel is currently not open;
+ * it is safe for us to cleanup the channel.
+ */
+ hv_process_channel_removal(channel);
+ } else {
+ complete(&channel->rescind_event);
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ }
+
+ /* The "channel" may have been freed. Do not access it any longer. */
+
+ if (clean_up_chan_for_suspend)
+ check_ready_for_suspend_event();
+}
+
+void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
+{
+ BUG_ON(!is_hvsock_channel(channel));
+
+ /* We always get a rescind msg when a connection is closed. */
+ while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
+ msleep(1);
+
+ vmbus_device_unregister(channel->device_obj);
+}
+EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
+
+
+/*
+ * vmbus_onoffers_delivered -
+ * This is invoked when all offers have been delivered.
+ *
+ * Nothing to do here.
+ */
+static void vmbus_onoffers_delivered(
+ struct vmbus_channel_message_header *hdr)
+{
+}
+
+/*
+ * vmbus_onopen_result - Open result handler.
+ *
+ * This is invoked when we received a response to our channel open request.
+ * Find the matching request, copy the response and signal the requesting
+ * thread.
+ */
+static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
+{
+ struct vmbus_channel_open_result *result;
+ struct vmbus_channel_msginfo *msginfo;
+ struct vmbus_channel_message_header *requestheader;
+ struct vmbus_channel_open_channel *openmsg;
+ unsigned long flags;
+
+ result = (struct vmbus_channel_open_result *)hdr;
+
+ trace_vmbus_onopen_result(result);
+
+ /*
+ * Find the open msg, copy the result and signal/unblock the wait event
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
+ msglistentry) {
+ requestheader =
+ (struct vmbus_channel_message_header *)msginfo->msg;
+
+ if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
+ openmsg =
+ (struct vmbus_channel_open_channel *)msginfo->msg;
+ if (openmsg->child_relid == result->child_relid &&
+ openmsg->openid == result->openid) {
+ memcpy(&msginfo->response.open_result,
+ result,
+ sizeof(
+ struct vmbus_channel_open_result));
+ complete(&msginfo->waitevent);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
+/*
+ * vmbus_ongpadl_created - GPADL created handler.
+ *
+ * This is invoked when we received a response to our gpadl create request.
+ * Find the matching request, copy the response and signal the requesting
+ * thread.
+ */
+static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
+{
+ struct vmbus_channel_gpadl_created *gpadlcreated;
+ struct vmbus_channel_msginfo *msginfo;
+ struct vmbus_channel_message_header *requestheader;
+ struct vmbus_channel_gpadl_header *gpadlheader;
+ unsigned long flags;
+
+ gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
+
+ trace_vmbus_ongpadl_created(gpadlcreated);
+
+ /*
+ * Find the establish msg, copy the result and signal/unblock the wait
+ * event
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
+ msglistentry) {
+ requestheader =
+ (struct vmbus_channel_message_header *)msginfo->msg;
+
+ if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
+ gpadlheader =
+ (struct vmbus_channel_gpadl_header *)requestheader;
+
+ if ((gpadlcreated->child_relid ==
+ gpadlheader->child_relid) &&
+ (gpadlcreated->gpadl == gpadlheader->gpadl)) {
+ memcpy(&msginfo->response.gpadl_created,
+ gpadlcreated,
+ sizeof(
+ struct vmbus_channel_gpadl_created));
+ complete(&msginfo->waitevent);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
+/*
+ * vmbus_onmodifychannel_response - Modify Channel response handler.
+ *
+ * This is invoked when we received a response to our channel modify request.
+ * Find the matching request, copy the response and signal the requesting thread.
+ */
+static void vmbus_onmodifychannel_response(struct vmbus_channel_message_header *hdr)
+{
+ struct vmbus_channel_modifychannel_response *response;
+ struct vmbus_channel_msginfo *msginfo;
+ unsigned long flags;
+
+ response = (struct vmbus_channel_modifychannel_response *)hdr;
+
+ trace_vmbus_onmodifychannel_response(response);
+
+ /*
+ * Find the modify msg, copy the response and signal/unblock the wait event.
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, msglistentry) {
+ struct vmbus_channel_message_header *responseheader =
+ (struct vmbus_channel_message_header *)msginfo->msg;
+
+ if (responseheader->msgtype == CHANNELMSG_MODIFYCHANNEL) {
+ struct vmbus_channel_modifychannel *modifymsg;
+
+ modifymsg = (struct vmbus_channel_modifychannel *)msginfo->msg;
+ if (modifymsg->child_relid == response->child_relid) {
+ memcpy(&msginfo->response.modify_response, response,
+ sizeof(*response));
+ complete(&msginfo->waitevent);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
+/*
+ * vmbus_ongpadl_torndown - GPADL torndown handler.
+ *
+ * This is invoked when we received a response to our gpadl teardown request.
+ * Find the matching request, copy the response and signal the requesting
+ * thread.
+ */
+static void vmbus_ongpadl_torndown(
+ struct vmbus_channel_message_header *hdr)
+{
+ struct vmbus_channel_gpadl_torndown *gpadl_torndown;
+ struct vmbus_channel_msginfo *msginfo;
+ struct vmbus_channel_message_header *requestheader;
+ struct vmbus_channel_gpadl_teardown *gpadl_teardown;
+ unsigned long flags;
+
+ gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
+
+ trace_vmbus_ongpadl_torndown(gpadl_torndown);
+
+ /*
+ * Find the open msg, copy the result and signal/unblock the wait event
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
+ msglistentry) {
+ requestheader =
+ (struct vmbus_channel_message_header *)msginfo->msg;
+
+ if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
+ gpadl_teardown =
+ (struct vmbus_channel_gpadl_teardown *)requestheader;
+
+ if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
+ memcpy(&msginfo->response.gpadl_torndown,
+ gpadl_torndown,
+ sizeof(
+ struct vmbus_channel_gpadl_torndown));
+ complete(&msginfo->waitevent);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
+/*
+ * vmbus_onversion_response - Version response handler
+ *
+ * This is invoked when we received a response to our initiate contact request.
+ * Find the matching request, copy the response and signal the requesting
+ * thread.
+ */
+static void vmbus_onversion_response(
+ struct vmbus_channel_message_header *hdr)
+{
+ struct vmbus_channel_msginfo *msginfo;
+ struct vmbus_channel_message_header *requestheader;
+ struct vmbus_channel_version_response *version_response;
+ unsigned long flags;
+
+ version_response = (struct vmbus_channel_version_response *)hdr;
+
+ trace_vmbus_onversion_response(version_response);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
+ msglistentry) {
+ requestheader =
+ (struct vmbus_channel_message_header *)msginfo->msg;
+
+ if (requestheader->msgtype ==
+ CHANNELMSG_INITIATE_CONTACT) {
+ memcpy(&msginfo->response.version_response,
+ version_response,
+ sizeof(struct vmbus_channel_version_response));
+ complete(&msginfo->waitevent);
+ }
+ }
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
+/* Channel message dispatch table */
+const struct vmbus_channel_message_table_entry
+channel_message_table[CHANNELMSG_COUNT] = {
+ { CHANNELMSG_INVALID, 0, NULL, 0},
+ { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer,
+ sizeof(struct vmbus_channel_offer_channel)},
+ { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind,
+ sizeof(struct vmbus_channel_rescind_offer) },
+ { CHANNELMSG_REQUESTOFFERS, 0, NULL, 0},
+ { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered, 0},
+ { CHANNELMSG_OPENCHANNEL, 0, NULL, 0},
+ { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result,
+ sizeof(struct vmbus_channel_open_result)},
+ { CHANNELMSG_CLOSECHANNEL, 0, NULL, 0},
+ { CHANNELMSG_GPADL_HEADER, 0, NULL, 0},
+ { CHANNELMSG_GPADL_BODY, 0, NULL, 0},
+ { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created,
+ sizeof(struct vmbus_channel_gpadl_created)},
+ { CHANNELMSG_GPADL_TEARDOWN, 0, NULL, 0},
+ { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown,
+ sizeof(struct vmbus_channel_gpadl_torndown) },
+ { CHANNELMSG_RELID_RELEASED, 0, NULL, 0},
+ { CHANNELMSG_INITIATE_CONTACT, 0, NULL, 0},
+ { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response,
+ sizeof(struct vmbus_channel_version_response)},
+ { CHANNELMSG_UNLOAD, 0, NULL, 0},
+ { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response, 0},
+ { CHANNELMSG_18, 0, NULL, 0},
+ { CHANNELMSG_19, 0, NULL, 0},
+ { CHANNELMSG_20, 0, NULL, 0},
+ { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
+ { CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
+ { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
+ { CHANNELMSG_MODIFYCHANNEL_RESPONSE, 1, vmbus_onmodifychannel_response,
+ sizeof(struct vmbus_channel_modifychannel_response)},
+};
+
+/*
+ * vmbus_onmessage - Handler for channel protocol messages.
+ *
+ * This is invoked in the vmbus worker thread context.
+ */
+void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
+{
+ trace_vmbus_on_message(hdr);
+
+ /*
+ * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
+ * out of bound and the message_handler pointer can not be NULL.
+ */
+ channel_message_table[hdr->msgtype].message_handler(hdr);
+}
+
+/*
+ * vmbus_request_offers - Send a request to get all our pending offers.
+ */
+int vmbus_request_offers(void)
+{
+ struct vmbus_channel_message_header *msg;
+ struct vmbus_channel_msginfo *msginfo;
+ int ret;
+
+ msginfo = kzalloc(sizeof(*msginfo) +
+ sizeof(struct vmbus_channel_message_header),
+ GFP_KERNEL);
+ if (!msginfo)
+ return -ENOMEM;
+
+ msg = (struct vmbus_channel_message_header *)msginfo->msg;
+
+ msg->msgtype = CHANNELMSG_REQUESTOFFERS;
+
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
+ true);
+
+ trace_vmbus_request_offers(ret);
+
+ if (ret != 0) {
+ pr_err("Unable to request offers - %d\n", ret);
+
+ goto cleanup;
+ }
+
+cleanup:
+ kfree(msginfo);
+
+ return ret;
+}
+
+void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
+ void (*sc_cr_cb)(struct vmbus_channel *new_sc))
+{
+ primary_channel->sc_creation_callback = sc_cr_cb;
+}
+EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
+
+void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
+ void (*chn_rescind_cb)(struct vmbus_channel *))
+{
+ channel->chn_rescind_callback = chn_rescind_cb;
+}
+EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
new file mode 100644
index 000000000..da51b5078
--- /dev/null
+++ b/drivers/hv/connection.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/hyperv.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/set_memory.h>
+#include <asm/mshyperv.h>
+
+#include "hyperv_vmbus.h"
+
+
+struct vmbus_connection vmbus_connection = {
+ .conn_state = DISCONNECTED,
+ .unload_event = COMPLETION_INITIALIZER(
+ vmbus_connection.unload_event),
+ .next_gpadl_handle = ATOMIC_INIT(0xE1E10),
+
+ .ready_for_suspend_event = COMPLETION_INITIALIZER(
+ vmbus_connection.ready_for_suspend_event),
+ .ready_for_resume_event = COMPLETION_INITIALIZER(
+ vmbus_connection.ready_for_resume_event),
+};
+EXPORT_SYMBOL_GPL(vmbus_connection);
+
+/*
+ * Negotiated protocol version with the host.
+ */
+__u32 vmbus_proto_version;
+EXPORT_SYMBOL_GPL(vmbus_proto_version);
+
+/*
+ * Table of VMBus versions listed from newest to oldest.
+ * VERSION_WIN7 and VERSION_WS2008 are no longer supported in
+ * Linux guests and are not listed.
+ */
+static __u32 vmbus_versions[] = {
+ VERSION_WIN10_V5_3,
+ VERSION_WIN10_V5_2,
+ VERSION_WIN10_V5_1,
+ VERSION_WIN10_V5,
+ VERSION_WIN10_V4_1,
+ VERSION_WIN10,
+ VERSION_WIN8_1,
+ VERSION_WIN8
+};
+
+/*
+ * Maximal VMBus protocol version guests can negotiate. Useful to cap the
+ * VMBus version for testing and debugging purpose.
+ */
+static uint max_version = VERSION_WIN10_V5_3;
+
+module_param(max_version, uint, S_IRUGO);
+MODULE_PARM_DESC(max_version,
+ "Maximal VMBus protocol version which can be negotiated");
+
+int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
+{
+ int ret = 0;
+ struct vmbus_channel_initiate_contact *msg;
+ unsigned long flags;
+
+ init_completion(&msginfo->waitevent);
+
+ msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
+
+ memset(msg, 0, sizeof(*msg));
+ msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
+ msg->vmbus_version_requested = version;
+
+ /*
+ * VMBus protocol 5.0 (VERSION_WIN10_V5) and higher require that we must
+ * use VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
+ * and for subsequent messages, we must use the Message Connection ID
+ * field in the host-returned Version Response Message. And, with
+ * VERSION_WIN10_V5 and higher, we don't use msg->interrupt_page, but we
+ * tell the host explicitly that we still use VMBUS_MESSAGE_SINT(2) for
+ * compatibility.
+ *
+ * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
+ */
+ if (version >= VERSION_WIN10_V5) {
+ msg->msg_sint = VMBUS_MESSAGE_SINT;
+ vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID_4;
+ } else {
+ msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
+ vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID;
+ }
+
+ msg->monitor_page1 = vmbus_connection.monitor_pages_pa[0];
+ msg->monitor_page2 = vmbus_connection.monitor_pages_pa[1];
+
+ msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
+
+ /*
+ * Add to list before we send the request since we may
+ * receive the response before returning from this routine
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&msginfo->msglistentry,
+ &vmbus_connection.chn_msg_list);
+
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(msg,
+ sizeof(struct vmbus_channel_initiate_contact),
+ true);
+
+ trace_vmbus_negotiate_version(msg, ret);
+
+ if (ret != 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+ flags);
+ return ret;
+ }
+
+ /* Wait for the connection response */
+ wait_for_completion(&msginfo->waitevent);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ /* Check if successful */
+ if (msginfo->response.version_response.version_supported) {
+ vmbus_connection.conn_state = CONNECTED;
+
+ if (version >= VERSION_WIN10_V5)
+ vmbus_connection.msg_conn_id =
+ msginfo->response.version_response.msg_conn_id;
+ } else {
+ return -ECONNREFUSED;
+ }
+
+ return ret;
+}
+
+/*
+ * vmbus_connect - Sends a connect request on the partition service connection
+ */
+int vmbus_connect(void)
+{
+ struct vmbus_channel_msginfo *msginfo = NULL;
+ int i, ret = 0;
+ __u32 version;
+
+ /* Initialize the vmbus connection */
+ vmbus_connection.conn_state = CONNECTING;
+ vmbus_connection.work_queue = create_workqueue("hv_vmbus_con");
+ if (!vmbus_connection.work_queue) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ vmbus_connection.rescind_work_queue =
+ create_workqueue("hv_vmbus_rescind");
+ if (!vmbus_connection.rescind_work_queue) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ vmbus_connection.ignore_any_offer_msg = false;
+
+ vmbus_connection.handle_primary_chan_wq =
+ create_workqueue("hv_pri_chan");
+ if (!vmbus_connection.handle_primary_chan_wq) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ vmbus_connection.handle_sub_chan_wq =
+ create_workqueue("hv_sub_chan");
+ if (!vmbus_connection.handle_sub_chan_wq) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ INIT_LIST_HEAD(&vmbus_connection.chn_msg_list);
+ spin_lock_init(&vmbus_connection.channelmsg_lock);
+
+ INIT_LIST_HEAD(&vmbus_connection.chn_list);
+ mutex_init(&vmbus_connection.channel_mutex);
+
+ /*
+ * Setup the vmbus event connection for channel interrupt
+ * abstraction stuff
+ */
+ vmbus_connection.int_page =
+ (void *)hv_alloc_hyperv_zeroed_page();
+ if (vmbus_connection.int_page == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ vmbus_connection.recv_int_page = vmbus_connection.int_page;
+ vmbus_connection.send_int_page =
+ (void *)((unsigned long)vmbus_connection.int_page +
+ (HV_HYP_PAGE_SIZE >> 1));
+
+ /*
+ * Setup the monitor notification facility. The 1st page for
+ * parent->child and the 2nd page for child->parent
+ */
+ vmbus_connection.monitor_pages[0] = (void *)hv_alloc_hyperv_zeroed_page();
+ vmbus_connection.monitor_pages[1] = (void *)hv_alloc_hyperv_zeroed_page();
+ if ((vmbus_connection.monitor_pages[0] == NULL) ||
+ (vmbus_connection.monitor_pages[1] == NULL)) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ vmbus_connection.monitor_pages_original[0]
+ = vmbus_connection.monitor_pages[0];
+ vmbus_connection.monitor_pages_original[1]
+ = vmbus_connection.monitor_pages[1];
+ vmbus_connection.monitor_pages_pa[0]
+ = virt_to_phys(vmbus_connection.monitor_pages[0]);
+ vmbus_connection.monitor_pages_pa[1]
+ = virt_to_phys(vmbus_connection.monitor_pages[1]);
+
+ if (hv_is_isolation_supported()) {
+ ret = set_memory_decrypted((unsigned long)
+ vmbus_connection.monitor_pages[0],
+ 1);
+ ret |= set_memory_decrypted((unsigned long)
+ vmbus_connection.monitor_pages[1],
+ 1);
+ if (ret)
+ goto cleanup;
+
+ /*
+ * Isolation VM with AMD SNP needs to access monitor page via
+ * address space above shared gpa boundary.
+ */
+ if (hv_isolation_type_snp()) {
+ vmbus_connection.monitor_pages_pa[0] +=
+ ms_hyperv.shared_gpa_boundary;
+ vmbus_connection.monitor_pages_pa[1] +=
+ ms_hyperv.shared_gpa_boundary;
+
+ vmbus_connection.monitor_pages[0]
+ = memremap(vmbus_connection.monitor_pages_pa[0],
+ HV_HYP_PAGE_SIZE,
+ MEMREMAP_WB);
+ if (!vmbus_connection.monitor_pages[0]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ vmbus_connection.monitor_pages[1]
+ = memremap(vmbus_connection.monitor_pages_pa[1],
+ HV_HYP_PAGE_SIZE,
+ MEMREMAP_WB);
+ if (!vmbus_connection.monitor_pages[1]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Set memory host visibility hvcall smears memory
+ * and so zero monitor pages here.
+ */
+ memset(vmbus_connection.monitor_pages[0], 0x00,
+ HV_HYP_PAGE_SIZE);
+ memset(vmbus_connection.monitor_pages[1], 0x00,
+ HV_HYP_PAGE_SIZE);
+
+ }
+
+ msginfo = kzalloc(sizeof(*msginfo) +
+ sizeof(struct vmbus_channel_initiate_contact),
+ GFP_KERNEL);
+ if (msginfo == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /*
+ * Negotiate a compatible VMBUS version number with the
+ * host. We start with the highest number we can support
+ * and work our way down until we negotiate a compatible
+ * version.
+ */
+
+ for (i = 0; ; i++) {
+ if (i == ARRAY_SIZE(vmbus_versions)) {
+ ret = -EDOM;
+ goto cleanup;
+ }
+
+ version = vmbus_versions[i];
+ if (version > max_version)
+ continue;
+
+ ret = vmbus_negotiate_version(msginfo, version);
+ if (ret == -ETIMEDOUT)
+ goto cleanup;
+
+ if (vmbus_connection.conn_state == CONNECTED)
+ break;
+ }
+
+ if (hv_is_isolation_supported() && version < VERSION_WIN10_V5_2) {
+ pr_err("Invalid VMBus version %d.%d (expected >= %d.%d) from the host supporting isolation\n",
+ version >> 16, version & 0xFFFF, VERSION_WIN10_V5_2 >> 16, VERSION_WIN10_V5_2 & 0xFFFF);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ vmbus_proto_version = version;
+ pr_info("Vmbus version:%d.%d\n",
+ version >> 16, version & 0xFFFF);
+
+ vmbus_connection.channels = kcalloc(MAX_CHANNEL_RELIDS,
+ sizeof(struct vmbus_channel *),
+ GFP_KERNEL);
+ if (vmbus_connection.channels == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ kfree(msginfo);
+ return 0;
+
+cleanup:
+ pr_err("Unable to connect to host\n");
+
+ vmbus_connection.conn_state = DISCONNECTED;
+ vmbus_disconnect();
+
+ kfree(msginfo);
+
+ return ret;
+}
+
+void vmbus_disconnect(void)
+{
+ /*
+ * First send the unload request to the host.
+ */
+ vmbus_initiate_unload(false);
+
+ if (vmbus_connection.handle_sub_chan_wq)
+ destroy_workqueue(vmbus_connection.handle_sub_chan_wq);
+
+ if (vmbus_connection.handle_primary_chan_wq)
+ destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
+
+ if (vmbus_connection.rescind_work_queue)
+ destroy_workqueue(vmbus_connection.rescind_work_queue);
+
+ if (vmbus_connection.work_queue)
+ destroy_workqueue(vmbus_connection.work_queue);
+
+ if (vmbus_connection.int_page) {
+ hv_free_hyperv_page((unsigned long)vmbus_connection.int_page);
+ vmbus_connection.int_page = NULL;
+ }
+
+ if (hv_is_isolation_supported()) {
+ /*
+ * memunmap() checks input address is ioremap address or not
+ * inside. It doesn't unmap any thing in the non-SNP CVM and
+ * so not check CVM type here.
+ */
+ memunmap(vmbus_connection.monitor_pages[0]);
+ memunmap(vmbus_connection.monitor_pages[1]);
+
+ set_memory_encrypted((unsigned long)
+ vmbus_connection.monitor_pages_original[0],
+ 1);
+ set_memory_encrypted((unsigned long)
+ vmbus_connection.monitor_pages_original[1],
+ 1);
+ }
+
+ hv_free_hyperv_page((unsigned long)
+ vmbus_connection.monitor_pages_original[0]);
+ hv_free_hyperv_page((unsigned long)
+ vmbus_connection.monitor_pages_original[1]);
+ vmbus_connection.monitor_pages_original[0] =
+ vmbus_connection.monitor_pages[0] = NULL;
+ vmbus_connection.monitor_pages_original[1] =
+ vmbus_connection.monitor_pages[1] = NULL;
+}
+
+/*
+ * relid2channel - Get the channel object given its
+ * child relative id (ie channel id)
+ */
+struct vmbus_channel *relid2channel(u32 relid)
+{
+ if (vmbus_connection.channels == NULL) {
+ pr_warn_once("relid2channel: relid=%d: No channels mapped!\n", relid);
+ return NULL;
+ }
+ if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
+ return NULL;
+ return READ_ONCE(vmbus_connection.channels[relid]);
+}
+
+/*
+ * vmbus_on_event - Process a channel event notification
+ *
+ * For batched channels (default) optimize host to guest signaling
+ * by ensuring:
+ * 1. While reading the channel, we disable interrupts from host.
+ * 2. Ensure that we process all posted messages from the host
+ * before returning from this callback.
+ * 3. Once we return, enable signaling from the host. Once this
+ * state is set we check to see if additional packets are
+ * available to read. In this case we repeat the process.
+ * If this tasklet has been running for a long time
+ * then reschedule ourselves.
+ */
+void vmbus_on_event(unsigned long data)
+{
+ struct vmbus_channel *channel = (void *) data;
+ void (*callback_fn)(void *context);
+
+ trace_vmbus_on_event(channel);
+
+ hv_debug_delay_test(channel, INTERRUPT_DELAY);
+
+ /* A channel once created is persistent even when
+ * there is no driver handling the device. An
+ * unloading driver sets the onchannel_callback to NULL.
+ */
+ callback_fn = READ_ONCE(channel->onchannel_callback);
+ if (unlikely(!callback_fn))
+ return;
+
+ (*callback_fn)(channel->channel_callback_context);
+
+ if (channel->callback_mode != HV_CALL_BATCHED)
+ return;
+
+ if (likely(hv_end_read(&channel->inbound) == 0))
+ return;
+
+ hv_begin_read(&channel->inbound);
+ tasklet_schedule(&channel->callback_event);
+}
+
+/*
+ * vmbus_post_msg - Send a msg on the vmbus's message connection
+ */
+int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
+{
+ struct vmbus_channel_message_header *hdr;
+ union hv_connection_id conn_id;
+ int ret = 0;
+ int retries = 0;
+ u32 usec = 1;
+
+ conn_id.asu32 = 0;
+ conn_id.u.id = vmbus_connection.msg_conn_id;
+
+ /*
+ * hv_post_message() can have transient failures because of
+ * insufficient resources. Retry the operation a couple of
+ * times before giving up.
+ */
+ while (retries < 100) {
+ ret = hv_post_message(conn_id, 1, buffer, buflen);
+
+ switch (ret) {
+ case HV_STATUS_INVALID_CONNECTION_ID:
+ /*
+ * See vmbus_negotiate_version(): VMBus protocol 5.0
+ * and higher require that we must use
+ * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate
+ * Contact message, but on old hosts that only
+ * support VMBus protocol 4.0 or lower, here we get
+ * HV_STATUS_INVALID_CONNECTION_ID and we should
+ * return an error immediately without retrying.
+ */
+ hdr = buffer;
+ if (hdr->msgtype == CHANNELMSG_INITIATE_CONTACT)
+ return -EINVAL;
+ /*
+ * We could get this if we send messages too
+ * frequently.
+ */
+ ret = -EAGAIN;
+ break;
+ case HV_STATUS_INSUFFICIENT_MEMORY:
+ case HV_STATUS_INSUFFICIENT_BUFFERS:
+ ret = -ENOBUFS;
+ break;
+ case HV_STATUS_SUCCESS:
+ return ret;
+ default:
+ pr_err("hv_post_msg() failed; error code:%d\n", ret);
+ return -EINVAL;
+ }
+
+ retries++;
+ if (can_sleep && usec > 1000)
+ msleep(usec / 1000);
+ else if (usec < MAX_UDELAY_MS * 1000)
+ udelay(usec);
+ else
+ mdelay(usec / 1000);
+
+ if (retries < 22)
+ usec *= 2;
+ }
+ return ret;
+}
+
+/*
+ * vmbus_set_event - Send an event notification to the parent
+ */
+void vmbus_set_event(struct vmbus_channel *channel)
+{
+ u32 child_relid = channel->offermsg.child_relid;
+
+ if (!channel->is_dedicated_interrupt)
+ vmbus_send_interrupt(child_relid);
+
+ ++channel->sig_events;
+
+ if (hv_isolation_type_snp())
+ hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event,
+ NULL, sizeof(channel->sig_event));
+ else
+ hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
+}
+EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
new file mode 100644
index 000000000..4d6480d57
--- /dev/null
+++ b/drivers/hv/hv.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/hyperv.h>
+#include <linux/random.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <clocksource/hyperv_timer.h>
+#include <asm/mshyperv.h>
+#include "hyperv_vmbus.h"
+
+/* The one and only */
+struct hv_context hv_context;
+
+/*
+ * hv_init - Main initialization routine.
+ *
+ * This routine must be called before any other routines in here are called
+ */
+int hv_init(void)
+{
+ hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
+ if (!hv_context.cpu_context)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * Functions for allocating and freeing memory with size and
+ * alignment HV_HYP_PAGE_SIZE. These functions are needed because
+ * the guest page size may not be the same as the Hyper-V page
+ * size. We depend upon kmalloc() aligning power-of-two size
+ * allocations to the allocation size boundary, so that the
+ * allocated memory appears to Hyper-V as a page of the size
+ * it expects.
+ */
+
+void *hv_alloc_hyperv_page(void)
+{
+ BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
+
+ if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+ return (void *)__get_free_page(GFP_KERNEL);
+ else
+ return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+}
+
+void *hv_alloc_hyperv_zeroed_page(void)
+{
+ if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+ return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ else
+ return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+}
+
+void hv_free_hyperv_page(unsigned long addr)
+{
+ if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+ free_page(addr);
+ else
+ kfree((void *)addr);
+}
+
+/*
+ * hv_post_message - Post a message using the hypervisor message IPC.
+ *
+ * This involves a hypercall.
+ */
+int hv_post_message(union hv_connection_id connection_id,
+ enum hv_message_type message_type,
+ void *payload, size_t payload_size)
+{
+ struct hv_input_post_message *aligned_msg;
+ struct hv_per_cpu_context *hv_cpu;
+ u64 status;
+
+ if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
+ return -EMSGSIZE;
+
+ hv_cpu = get_cpu_ptr(hv_context.cpu_context);
+ aligned_msg = hv_cpu->post_msg_page;
+ aligned_msg->connectionid = connection_id;
+ aligned_msg->reserved = 0;
+ aligned_msg->message_type = message_type;
+ aligned_msg->payload_size = payload_size;
+ memcpy((void *)aligned_msg->payload, payload, payload_size);
+
+ if (hv_isolation_type_snp())
+ status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
+ (void *)aligned_msg, NULL,
+ sizeof(*aligned_msg));
+ else
+ status = hv_do_hypercall(HVCALL_POST_MESSAGE,
+ aligned_msg, NULL);
+
+ /* Preemption must remain disabled until after the hypercall
+ * so some other thread can't get scheduled onto this cpu and
+ * corrupt the per-cpu post_msg_page
+ */
+ put_cpu_ptr(hv_cpu);
+
+ return hv_result(status);
+}
+
+int hv_synic_alloc(void)
+{
+ int cpu;
+ struct hv_per_cpu_context *hv_cpu;
+
+ /*
+ * First, zero all per-cpu memory areas so hv_synic_free() can
+ * detect what memory has been allocated and cleanup properly
+ * after any failures.
+ */
+ for_each_present_cpu(cpu) {
+ hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
+ memset(hv_cpu, 0, sizeof(*hv_cpu));
+ }
+
+ hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
+ GFP_KERNEL);
+ if (hv_context.hv_numa_map == NULL) {
+ pr_err("Unable to allocate NUMA map\n");
+ goto err;
+ }
+
+ for_each_present_cpu(cpu) {
+ hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ tasklet_init(&hv_cpu->msg_dpc,
+ vmbus_on_msg_dpc, (unsigned long) hv_cpu);
+
+ /*
+ * Synic message and event pages are allocated by paravisor.
+ * Skip these pages allocation here.
+ */
+ if (!hv_isolation_type_snp()) {
+ hv_cpu->synic_message_page =
+ (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->synic_message_page == NULL) {
+ pr_err("Unable to allocate SYNIC message page\n");
+ goto err;
+ }
+
+ hv_cpu->synic_event_page =
+ (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->synic_event_page == NULL) {
+ pr_err("Unable to allocate SYNIC event page\n");
+ goto err;
+ }
+ }
+
+ hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->post_msg_page == NULL) {
+ pr_err("Unable to allocate post msg page\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ /*
+ * Any memory allocations that succeeded will be freed when
+ * the caller cleans up by calling hv_synic_free()
+ */
+ return -ENOMEM;
+}
+
+
+void hv_synic_free(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ free_page((unsigned long)hv_cpu->synic_event_page);
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ free_page((unsigned long)hv_cpu->post_msg_page);
+ }
+
+ kfree(hv_context.hv_numa_map);
+}
+
+/*
+ * hv_synic_init - Initialize the Synthetic Interrupt Controller.
+ *
+ * If it is already initialized by another entity (ie x2v shim), we need to
+ * retrieve the initialized message and event pages. Otherwise, we create and
+ * initialize the message and event pages.
+ */
+void hv_synic_enable_regs(unsigned int cpu)
+{
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ union hv_synic_sint shared_sint;
+ union hv_synic_scontrol sctrl;
+
+ /* Setup the Synic's message page */
+ simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ simp.simp_enabled = 1;
+
+ if (hv_isolation_type_snp()) {
+ hv_cpu->synic_message_page
+ = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
+ HV_HYP_PAGE_SIZE, MEMREMAP_WB);
+ if (!hv_cpu->synic_message_page)
+ pr_err("Fail to map syinc message page.\n");
+ } else {
+ simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
+ >> HV_HYP_PAGE_SHIFT;
+ }
+
+ hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
+
+ /* Setup the Synic's event page */
+ siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
+ siefp.siefp_enabled = 1;
+
+ if (hv_isolation_type_snp()) {
+ hv_cpu->synic_event_page =
+ memremap(siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT,
+ HV_HYP_PAGE_SIZE, MEMREMAP_WB);
+
+ if (!hv_cpu->synic_event_page)
+ pr_err("Fail to map syinc event page.\n");
+ } else {
+ siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
+ >> HV_HYP_PAGE_SHIFT;
+ }
+
+ hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
+
+ /* Setup the shared SINT. */
+ if (vmbus_irq != -1)
+ enable_percpu_irq(vmbus_irq, 0);
+ shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
+ VMBUS_MESSAGE_SINT);
+
+ shared_sint.vector = vmbus_interrupt;
+ shared_sint.masked = false;
+
+ /*
+ * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
+ * it doesn't provide a recommendation flag and AEOI must be disabled.
+ */
+#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
+ shared_sint.auto_eoi =
+ !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
+#else
+ shared_sint.auto_eoi = 0;
+#endif
+ hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
+
+ /* Enable the global synic bit */
+ sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
+ sctrl.enable = 1;
+
+ hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+}
+
+int hv_synic_init(unsigned int cpu)
+{
+ hv_synic_enable_regs(cpu);
+
+ hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
+
+ return 0;
+}
+
+/*
+ * hv_synic_cleanup - Cleanup routine for hv_synic_init().
+ */
+void hv_synic_disable_regs(unsigned int cpu)
+{
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+ union hv_synic_sint shared_sint;
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ union hv_synic_scontrol sctrl;
+
+ shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
+ VMBUS_MESSAGE_SINT);
+
+ shared_sint.masked = 1;
+
+ /* Need to correctly cleanup in the case of SMP!!! */
+ /* Disable the interrupt */
+ hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
+
+ simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ /*
+ * In Isolation VM, sim and sief pages are allocated by
+ * paravisor. These pages also will be used by kdump
+ * kernel. So just reset enable bit here and keep page
+ * addresses.
+ */
+ simp.simp_enabled = 0;
+ if (hv_isolation_type_snp())
+ memunmap(hv_cpu->synic_message_page);
+ else
+ simp.base_simp_gpa = 0;
+
+ hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
+
+ siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
+ siefp.siefp_enabled = 0;
+
+ if (hv_isolation_type_snp())
+ memunmap(hv_cpu->synic_event_page);
+ else
+ siefp.base_siefp_gpa = 0;
+
+ hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
+
+ /* Disable the global synic bit */
+ sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
+ sctrl.enable = 0;
+ hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+
+ if (vmbus_irq != -1)
+ disable_percpu_irq(vmbus_irq);
+}
+
+#define HV_MAX_TRIES 3
+/*
+ * Scan the event flags page of 'this' CPU looking for any bit that is set. If we find one
+ * bit set, then wait for a few milliseconds. Repeat these steps for a maximum of 3 times.
+ * Return 'true', if there is still any set bit after this operation; 'false', otherwise.
+ *
+ * If a bit is set, that means there is a pending channel interrupt. The expectation is
+ * that the normal interrupt handling mechanism will find and process the channel interrupt
+ * "very soon", and in the process clear the bit.
+ */
+static bool hv_synic_event_pending(void)
+{
+ struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
+ union hv_synic_event_flags *event =
+ (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
+ unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
+ bool pending;
+ u32 relid;
+ int tries = 0;
+
+retry:
+ pending = false;
+ for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
+ /* Special case - VMBus channel protocol messages */
+ if (relid == 0)
+ continue;
+ pending = true;
+ break;
+ }
+ if (pending && tries++ < HV_MAX_TRIES) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ return pending;
+}
+
+int hv_synic_cleanup(unsigned int cpu)
+{
+ struct vmbus_channel *channel, *sc;
+ bool channel_found = false;
+
+ if (vmbus_connection.conn_state != CONNECTED)
+ goto always_cleanup;
+
+ /*
+ * Hyper-V does not provide a way to change the connect CPU once
+ * it is set; we must prevent the connect CPU from going offline
+ * while the VM is running normally. But in the panic or kexec()
+ * path where the vmbus is already disconnected, the CPU must be
+ * allowed to shut down.
+ */
+ if (cpu == VMBUS_CONNECT_CPU)
+ return -EBUSY;
+
+ /*
+ * Search for channels which are bound to the CPU we're about to
+ * cleanup. In case we find one and vmbus is still connected, we
+ * fail; this will effectively prevent CPU offlining.
+ *
+ * TODO: Re-bind the channels to different CPUs.
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (channel->target_cpu == cpu) {
+ channel_found = true;
+ break;
+ }
+ list_for_each_entry(sc, &channel->sc_list, sc_list) {
+ if (sc->target_cpu == cpu) {
+ channel_found = true;
+ break;
+ }
+ }
+ if (channel_found)
+ break;
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ if (channel_found)
+ return -EBUSY;
+
+ /*
+ * channel_found == false means that any channels that were previously
+ * assigned to the CPU have been reassigned elsewhere with a call of
+ * vmbus_send_modifychannel(). Scan the event flags page looking for
+ * bits that are set and waiting with a timeout for vmbus_chan_sched()
+ * to process such bits. If bits are still set after this operation
+ * and VMBus is connected, fail the CPU offlining operation.
+ */
+ if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
+ return -EBUSY;
+
+always_cleanup:
+ hv_stimer_legacy_cleanup(cpu);
+
+ hv_synic_disable_regs(cpu);
+
+ return 0;
+}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
new file mode 100644
index 000000000..f98c84909
--- /dev/null
+++ b/drivers/hv/hv_balloon.c
@@ -0,0 +1,2122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012, Microsoft Corporation.
+ *
+ * Author:
+ * K. Y. Srinivasan <kys@microsoft.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/mman.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/count_zeros.h>
+#include <linux/memory_hotplug.h>
+#include <linux/memory.h>
+#include <linux/notifier.h>
+#include <linux/percpu_counter.h>
+#include <linux/page_reporting.h>
+
+#include <linux/hyperv.h>
+#include <asm/hyperv-tlfs.h>
+
+#include <asm/mshyperv.h>
+
+#define CREATE_TRACE_POINTS
+#include "hv_trace_balloon.h"
+
+/*
+ * We begin with definitions supporting the Dynamic Memory protocol
+ * with the host.
+ *
+ * Begin protocol definitions.
+ */
+
+
+
+/*
+ * Protocol versions. The low word is the minor version, the high word the major
+ * version.
+ *
+ * History:
+ * Initial version 1.0
+ * Changed to 0.1 on 2009/03/25
+ * Changes to 0.2 on 2009/05/14
+ * Changes to 0.3 on 2009/12/03
+ * Changed to 1.0 on 2011/04/05
+ */
+
+#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
+#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
+#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
+
+enum {
+ DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
+ DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
+ DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
+
+ DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
+ DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
+ DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
+
+ DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
+};
+
+
+
+/*
+ * Message Types
+ */
+
+enum dm_message_type {
+ /*
+ * Version 0.3
+ */
+ DM_ERROR = 0,
+ DM_VERSION_REQUEST = 1,
+ DM_VERSION_RESPONSE = 2,
+ DM_CAPABILITIES_REPORT = 3,
+ DM_CAPABILITIES_RESPONSE = 4,
+ DM_STATUS_REPORT = 5,
+ DM_BALLOON_REQUEST = 6,
+ DM_BALLOON_RESPONSE = 7,
+ DM_UNBALLOON_REQUEST = 8,
+ DM_UNBALLOON_RESPONSE = 9,
+ DM_MEM_HOT_ADD_REQUEST = 10,
+ DM_MEM_HOT_ADD_RESPONSE = 11,
+ DM_VERSION_03_MAX = 11,
+ /*
+ * Version 1.0.
+ */
+ DM_INFO_MESSAGE = 12,
+ DM_VERSION_1_MAX = 12
+};
+
+
+/*
+ * Structures defining the dynamic memory management
+ * protocol.
+ */
+
+union dm_version {
+ struct {
+ __u16 minor_version;
+ __u16 major_version;
+ };
+ __u32 version;
+} __packed;
+
+
+union dm_caps {
+ struct {
+ __u64 balloon:1;
+ __u64 hot_add:1;
+ /*
+ * To support guests that may have alignment
+ * limitations on hot-add, the guest can specify
+ * its alignment requirements; a value of n
+ * represents an alignment of 2^n in mega bytes.
+ */
+ __u64 hot_add_alignment:4;
+ __u64 reservedz:58;
+ } cap_bits;
+ __u64 caps;
+} __packed;
+
+union dm_mem_page_range {
+ struct {
+ /*
+ * The PFN number of the first page in the range.
+ * 40 bits is the architectural limit of a PFN
+ * number for AMD64.
+ */
+ __u64 start_page:40;
+ /*
+ * The number of pages in the range.
+ */
+ __u64 page_cnt:24;
+ } finfo;
+ __u64 page_range;
+} __packed;
+
+
+
+/*
+ * The header for all dynamic memory messages:
+ *
+ * type: Type of the message.
+ * size: Size of the message in bytes; including the header.
+ * trans_id: The guest is responsible for manufacturing this ID.
+ */
+
+struct dm_header {
+ __u16 type;
+ __u16 size;
+ __u32 trans_id;
+} __packed;
+
+/*
+ * A generic message format for dynamic memory.
+ * Specific message formats are defined later in the file.
+ */
+
+struct dm_message {
+ struct dm_header hdr;
+ __u8 data[]; /* enclosed message */
+} __packed;
+
+
+/*
+ * Specific message types supporting the dynamic memory protocol.
+ */
+
+/*
+ * Version negotiation message. Sent from the guest to the host.
+ * The guest is free to try different versions until the host
+ * accepts the version.
+ *
+ * dm_version: The protocol version requested.
+ * is_last_attempt: If TRUE, this is the last version guest will request.
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct dm_version_request {
+ struct dm_header hdr;
+ union dm_version version;
+ __u32 is_last_attempt:1;
+ __u32 reservedz:31;
+} __packed;
+
+/*
+ * Version response message; Host to Guest and indicates
+ * if the host has accepted the version sent by the guest.
+ *
+ * is_accepted: If TRUE, host has accepted the version and the guest
+ * should proceed to the next stage of the protocol. FALSE indicates that
+ * guest should re-try with a different version.
+ *
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct dm_version_response {
+ struct dm_header hdr;
+ __u64 is_accepted:1;
+ __u64 reservedz:63;
+} __packed;
+
+/*
+ * Message reporting capabilities. This is sent from the guest to the
+ * host.
+ */
+
+struct dm_capabilities {
+ struct dm_header hdr;
+ union dm_caps caps;
+ __u64 min_page_cnt;
+ __u64 max_page_number;
+} __packed;
+
+/*
+ * Response to the capabilities message. This is sent from the host to the
+ * guest. This message notifies if the host has accepted the guest's
+ * capabilities. If the host has not accepted, the guest must shutdown
+ * the service.
+ *
+ * is_accepted: Indicates if the host has accepted guest's capabilities.
+ * reservedz: Must be 0.
+ */
+
+struct dm_capabilities_resp_msg {
+ struct dm_header hdr;
+ __u64 is_accepted:1;
+ __u64 reservedz:63;
+} __packed;
+
+/*
+ * This message is used to report memory pressure from the guest.
+ * This message is not part of any transaction and there is no
+ * response to this message.
+ *
+ * num_avail: Available memory in pages.
+ * num_committed: Committed memory in pages.
+ * page_file_size: The accumulated size of all page files
+ * in the system in pages.
+ * zero_free: The number of zero and free pages.
+ * page_file_writes: The writes to the page file in pages.
+ * io_diff: An indicator of file cache efficiency or page file activity,
+ * calculated as File Cache Page Fault Count - Page Read Count.
+ * This value is in pages.
+ *
+ * Some of these metrics are Windows specific and fortunately
+ * the algorithm on the host side that computes the guest memory
+ * pressure only uses num_committed value.
+ */
+
+struct dm_status {
+ struct dm_header hdr;
+ __u64 num_avail;
+ __u64 num_committed;
+ __u64 page_file_size;
+ __u64 zero_free;
+ __u32 page_file_writes;
+ __u32 io_diff;
+} __packed;
+
+
+/*
+ * Message to ask the guest to allocate memory - balloon up message.
+ * This message is sent from the host to the guest. The guest may not be
+ * able to allocate as much memory as requested.
+ *
+ * num_pages: number of pages to allocate.
+ */
+
+struct dm_balloon {
+ struct dm_header hdr;
+ __u32 num_pages;
+ __u32 reservedz;
+} __packed;
+
+
+/*
+ * Balloon response message; this message is sent from the guest
+ * to the host in response to the balloon message.
+ *
+ * reservedz: Reserved; must be set to zero.
+ * more_pages: If FALSE, this is the last message of the transaction.
+ * if TRUE there will atleast one more message from the guest.
+ *
+ * range_count: The number of ranges in the range array.
+ *
+ * range_array: An array of page ranges returned to the host.
+ *
+ */
+
+struct dm_balloon_response {
+ struct dm_header hdr;
+ __u32 reservedz;
+ __u32 more_pages:1;
+ __u32 range_count:31;
+ union dm_mem_page_range range_array[];
+} __packed;
+
+/*
+ * Un-balloon message; this message is sent from the host
+ * to the guest to give guest more memory.
+ *
+ * more_pages: If FALSE, this is the last message of the transaction.
+ * if TRUE there will atleast one more message from the guest.
+ *
+ * reservedz: Reserved; must be set to zero.
+ *
+ * range_count: The number of ranges in the range array.
+ *
+ * range_array: An array of page ranges returned to the host.
+ *
+ */
+
+struct dm_unballoon_request {
+ struct dm_header hdr;
+ __u32 more_pages:1;
+ __u32 reservedz:31;
+ __u32 range_count;
+ union dm_mem_page_range range_array[];
+} __packed;
+
+/*
+ * Un-balloon response message; this message is sent from the guest
+ * to the host in response to an unballoon request.
+ *
+ */
+
+struct dm_unballoon_response {
+ struct dm_header hdr;
+} __packed;
+
+
+/*
+ * Hot add request message. Message sent from the host to the guest.
+ *
+ * mem_range: Memory range to hot add.
+ *
+ */
+
+struct dm_hot_add {
+ struct dm_header hdr;
+ union dm_mem_page_range range;
+} __packed;
+
+/*
+ * Hot add response message.
+ * This message is sent by the guest to report the status of a hot add request.
+ * If page_count is less than the requested page count, then the host should
+ * assume all further hot add requests will fail, since this indicates that
+ * the guest has hit an upper physical memory barrier.
+ *
+ * Hot adds may also fail due to low resources; in this case, the guest must
+ * not complete this message until the hot add can succeed, and the host must
+ * not send a new hot add request until the response is sent.
+ * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
+ * times it fails the request.
+ *
+ *
+ * page_count: number of pages that were successfully hot added.
+ *
+ * result: result of the operation 1: success, 0: failure.
+ *
+ */
+
+struct dm_hot_add_response {
+ struct dm_header hdr;
+ __u32 page_count;
+ __u32 result;
+} __packed;
+
+/*
+ * Types of information sent from host to the guest.
+ */
+
+enum dm_info_type {
+ INFO_TYPE_MAX_PAGE_CNT = 0,
+ MAX_INFO_TYPE
+};
+
+
+/*
+ * Header for the information message.
+ */
+
+struct dm_info_header {
+ enum dm_info_type type;
+ __u32 data_size;
+} __packed;
+
+/*
+ * This message is sent from the host to the guest to pass
+ * some relevant information (win8 addition).
+ *
+ * reserved: no used.
+ * info_size: size of the information blob.
+ * info: information blob.
+ */
+
+struct dm_info_msg {
+ struct dm_header hdr;
+ __u32 reserved;
+ __u32 info_size;
+ __u8 info[];
+};
+
+/*
+ * End protocol definitions.
+ */
+
+/*
+ * State to manage hot adding memory into the guest.
+ * The range start_pfn : end_pfn specifies the range
+ * that the host has asked us to hot add. The range
+ * start_pfn : ha_end_pfn specifies the range that we have
+ * currently hot added. We hot add in multiples of 128M
+ * chunks; it is possible that we may not be able to bring
+ * online all the pages in the region. The range
+ * covered_start_pfn:covered_end_pfn defines the pages that can
+ * be brough online.
+ */
+
+struct hv_hotadd_state {
+ struct list_head list;
+ unsigned long start_pfn;
+ unsigned long covered_start_pfn;
+ unsigned long covered_end_pfn;
+ unsigned long ha_end_pfn;
+ unsigned long end_pfn;
+ /*
+ * A list of gaps.
+ */
+ struct list_head gap_list;
+};
+
+struct hv_hotadd_gap {
+ struct list_head list;
+ unsigned long start_pfn;
+ unsigned long end_pfn;
+};
+
+struct balloon_state {
+ __u32 num_pages;
+ struct work_struct wrk;
+};
+
+struct hot_add_wrk {
+ union dm_mem_page_range ha_page_range;
+ union dm_mem_page_range ha_region_range;
+ struct work_struct wrk;
+};
+
+static bool allow_hibernation;
+static bool hot_add = true;
+static bool do_hot_add;
+/*
+ * Delay reporting memory pressure by
+ * the specified number of seconds.
+ */
+static uint pressure_report_delay = 45;
+
+/*
+ * The last time we posted a pressure report to host.
+ */
+static unsigned long last_post_time;
+
+module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+
+module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
+static atomic_t trans_id = ATOMIC_INIT(0);
+
+static int dm_ring_size = VMBUS_RING_SIZE(16 * 1024);
+
+/*
+ * Driver specific state.
+ */
+
+enum hv_dm_state {
+ DM_INITIALIZING = 0,
+ DM_INITIALIZED,
+ DM_BALLOON_UP,
+ DM_BALLOON_DOWN,
+ DM_HOT_ADD,
+ DM_INIT_ERROR
+};
+
+
+static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
+static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
+#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
+#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
+
+struct hv_dynmem_device {
+ struct hv_device *dev;
+ enum hv_dm_state state;
+ struct completion host_event;
+ struct completion config_event;
+
+ /*
+ * Number of pages we have currently ballooned out.
+ */
+ unsigned int num_pages_ballooned;
+ unsigned int num_pages_onlined;
+ unsigned int num_pages_added;
+
+ /*
+ * State to manage the ballooning (up) operation.
+ */
+ struct balloon_state balloon_wrk;
+
+ /*
+ * State to execute the "hot-add" operation.
+ */
+ struct hot_add_wrk ha_wrk;
+
+ /*
+ * This state tracks if the host has specified a hot-add
+ * region.
+ */
+ bool host_specified_ha_region;
+
+ /*
+ * State to synchronize hot-add.
+ */
+ struct completion ol_waitevent;
+ /*
+ * This thread handles hot-add
+ * requests from the host as well as notifying
+ * the host with regards to memory pressure in
+ * the guest.
+ */
+ struct task_struct *thread;
+
+ /*
+ * Protects ha_region_list, num_pages_onlined counter and individual
+ * regions from ha_region_list.
+ */
+ spinlock_t ha_lock;
+
+ /*
+ * A list of hot-add regions.
+ */
+ struct list_head ha_region_list;
+
+ /*
+ * We start with the highest version we can support
+ * and downgrade based on the host; we save here the
+ * next version to try.
+ */
+ __u32 next_version;
+
+ /*
+ * The negotiated version agreed by host.
+ */
+ __u32 version;
+
+ struct page_reporting_dev_info pr_dev_info;
+
+ /*
+ * Maximum number of pages that can be hot_add-ed
+ */
+ __u64 max_dynamic_page_count;
+};
+
+static struct hv_dynmem_device dm_device;
+
+static void post_status(struct hv_dynmem_device *dm);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
+ unsigned long pfn)
+{
+ struct hv_hotadd_gap *gap;
+
+ /* The page is not backed. */
+ if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
+ return false;
+
+ /* Check for gaps. */
+ list_for_each_entry(gap, &has->gap_list, list) {
+ if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
+ return false;
+ }
+
+ return true;
+}
+
+static unsigned long hv_page_offline_check(unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ unsigned long pfn = start_pfn, count = 0;
+ struct hv_hotadd_state *has;
+ bool found;
+
+ while (pfn < start_pfn + nr_pages) {
+ /*
+ * Search for HAS which covers the pfn and when we find one
+ * count how many consequitive PFNs are covered.
+ */
+ found = false;
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
+ while ((pfn >= has->start_pfn) &&
+ (pfn < has->end_pfn) &&
+ (pfn < start_pfn + nr_pages)) {
+ found = true;
+ if (has_pfn_is_backed(has, pfn))
+ count++;
+ pfn++;
+ }
+ }
+
+ /*
+ * This PFN is not in any HAS (e.g. we're offlining a region
+ * which was present at boot), no need to account for it. Go
+ * to the next one.
+ */
+ if (!found)
+ pfn++;
+ }
+
+ return count;
+}
+
+static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct memory_notify *mem = (struct memory_notify *)v;
+ unsigned long flags, pfn_count;
+
+ switch (val) {
+ case MEM_ONLINE:
+ case MEM_CANCEL_ONLINE:
+ complete(&dm_device.ol_waitevent);
+ break;
+
+ case MEM_OFFLINE:
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ pfn_count = hv_page_offline_check(mem->start_pfn,
+ mem->nr_pages);
+ if (pfn_count <= dm_device.num_pages_onlined) {
+ dm_device.num_pages_onlined -= pfn_count;
+ } else {
+ /*
+ * We're offlining more pages than we managed to online.
+ * This is unexpected. In any case don't let
+ * num_pages_onlined wrap around zero.
+ */
+ WARN_ON_ONCE(1);
+ dm_device.num_pages_onlined = 0;
+ }
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ break;
+ case MEM_GOING_ONLINE:
+ case MEM_GOING_OFFLINE:
+ case MEM_CANCEL_OFFLINE:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block hv_memory_nb = {
+ .notifier_call = hv_memory_notifier,
+ .priority = 0
+};
+
+/* Check if the particular page is backed and can be onlined and online it. */
+static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
+{
+ if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
+ if (!PageOffline(pg))
+ __SetPageOffline(pg);
+ return;
+ }
+ if (PageOffline(pg))
+ __ClearPageOffline(pg);
+
+ /* This frame is currently backed; online the page. */
+ generic_online_page(pg, 0);
+
+ lockdep_assert_held(&dm_device.ha_lock);
+ dm_device.num_pages_onlined++;
+}
+
+static void hv_bring_pgs_online(struct hv_hotadd_state *has,
+ unsigned long start_pfn, unsigned long size)
+{
+ int i;
+
+ pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
+ for (i = 0; i < size; i++)
+ hv_page_online_one(has, pfn_to_page(start_pfn + i));
+}
+
+static void hv_mem_hot_add(unsigned long start, unsigned long size,
+ unsigned long pfn_count,
+ struct hv_hotadd_state *has)
+{
+ int ret = 0;
+ int i, nid;
+ unsigned long start_pfn;
+ unsigned long processed_pfn;
+ unsigned long total_pfn = pfn_count;
+ unsigned long flags;
+
+ for (i = 0; i < (size/HA_CHUNK); i++) {
+ start_pfn = start + (i * HA_CHUNK);
+
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ has->ha_end_pfn += HA_CHUNK;
+
+ if (total_pfn > HA_CHUNK) {
+ processed_pfn = HA_CHUNK;
+ total_pfn -= HA_CHUNK;
+ } else {
+ processed_pfn = total_pfn;
+ total_pfn = 0;
+ }
+
+ has->covered_end_pfn += processed_pfn;
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+
+ reinit_completion(&dm_device.ol_waitevent);
+
+ nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
+ ret = add_memory(nid, PFN_PHYS((start_pfn)),
+ (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
+
+ if (ret) {
+ pr_err("hot_add memory failed error is %d\n", ret);
+ if (ret == -EEXIST) {
+ /*
+ * This error indicates that the error
+ * is not a transient failure. This is the
+ * case where the guest's physical address map
+ * precludes hot adding memory. Stop all further
+ * memory hot-add.
+ */
+ do_hot_add = false;
+ }
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ has->ha_end_pfn -= HA_CHUNK;
+ has->covered_end_pfn -= processed_pfn;
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ break;
+ }
+
+ /*
+ * Wait for memory to get onlined. If the kernel onlined the
+ * memory when adding it, this will return directly. Otherwise,
+ * it will wait for user space to online the memory. This helps
+ * to avoid adding memory faster than it is getting onlined. As
+ * adding succeeded, it is ok to proceed even if the memory was
+ * not onlined in time.
+ */
+ wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
+ post_status(&dm_device);
+ }
+}
+
+static void hv_online_page(struct page *pg, unsigned int order)
+{
+ struct hv_hotadd_state *has;
+ unsigned long flags;
+ unsigned long pfn = page_to_pfn(pg);
+
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
+ /* The page belongs to a different HAS. */
+ if ((pfn < has->start_pfn) ||
+ (pfn + (1UL << order) > has->end_pfn))
+ continue;
+
+ hv_bring_pgs_online(has, pfn, 1UL << order);
+ break;
+ }
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+}
+
+static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
+{
+ struct hv_hotadd_state *has;
+ struct hv_hotadd_gap *gap;
+ unsigned long residual, new_inc;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
+ /*
+ * If the pfn range we are dealing with is not in the current
+ * "hot add block", move on.
+ */
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
+ continue;
+
+ /*
+ * If the current start pfn is not where the covered_end
+ * is, create a gap and update covered_end_pfn.
+ */
+ if (has->covered_end_pfn != start_pfn) {
+ gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
+ if (!gap) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ INIT_LIST_HEAD(&gap->list);
+ gap->start_pfn = has->covered_end_pfn;
+ gap->end_pfn = start_pfn;
+ list_add_tail(&gap->list, &has->gap_list);
+
+ has->covered_end_pfn = start_pfn;
+ }
+
+ /*
+ * If the current hot add-request extends beyond
+ * our current limit; extend it.
+ */
+ if ((start_pfn + pfn_cnt) > has->end_pfn) {
+ residual = (start_pfn + pfn_cnt - has->end_pfn);
+ /*
+ * Extend the region by multiples of HA_CHUNK.
+ */
+ new_inc = (residual / HA_CHUNK) * HA_CHUNK;
+ if (residual % HA_CHUNK)
+ new_inc += HA_CHUNK;
+
+ has->end_pfn += new_inc;
+ }
+
+ ret = 1;
+ break;
+ }
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+
+ return ret;
+}
+
+static unsigned long handle_pg_range(unsigned long pg_start,
+ unsigned long pg_count)
+{
+ unsigned long start_pfn = pg_start;
+ unsigned long pfn_cnt = pg_count;
+ unsigned long size;
+ struct hv_hotadd_state *has;
+ unsigned long pgs_ol = 0;
+ unsigned long old_covered_state;
+ unsigned long res = 0, flags;
+
+ pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
+ pg_start);
+
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
+ /*
+ * If the pfn range we are dealing with is not in the current
+ * "hot add block", move on.
+ */
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
+ continue;
+
+ old_covered_state = has->covered_end_pfn;
+
+ if (start_pfn < has->ha_end_pfn) {
+ /*
+ * This is the case where we are backing pages
+ * in an already hot added region. Bring
+ * these pages online first.
+ */
+ pgs_ol = has->ha_end_pfn - start_pfn;
+ if (pgs_ol > pfn_cnt)
+ pgs_ol = pfn_cnt;
+
+ has->covered_end_pfn += pgs_ol;
+ pfn_cnt -= pgs_ol;
+ /*
+ * Check if the corresponding memory block is already
+ * online. It is possible to observe struct pages still
+ * being uninitialized here so check section instead.
+ * In case the section is online we need to bring the
+ * rest of pfns (which were not backed previously)
+ * online too.
+ */
+ if (start_pfn > has->start_pfn &&
+ online_section_nr(pfn_to_section_nr(start_pfn)))
+ hv_bring_pgs_online(has, start_pfn, pgs_ol);
+
+ }
+
+ if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
+ /*
+ * We have some residual hot add range
+ * that needs to be hot added; hot add
+ * it now. Hot add a multiple of
+ * HA_CHUNK that fully covers the pages
+ * we have.
+ */
+ size = (has->end_pfn - has->ha_end_pfn);
+ if (pfn_cnt <= size) {
+ size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
+ if (pfn_cnt % HA_CHUNK)
+ size += HA_CHUNK;
+ } else {
+ pfn_cnt = size;
+ }
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ }
+ /*
+ * If we managed to online any pages that were given to us,
+ * we declare success.
+ */
+ res = has->covered_end_pfn - old_covered_state;
+ break;
+ }
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+
+ return res;
+}
+
+static unsigned long process_hot_add(unsigned long pg_start,
+ unsigned long pfn_cnt,
+ unsigned long rg_start,
+ unsigned long rg_size)
+{
+ struct hv_hotadd_state *ha_region = NULL;
+ int covered;
+ unsigned long flags;
+
+ if (pfn_cnt == 0)
+ return 0;
+
+ if (!dm_device.host_specified_ha_region) {
+ covered = pfn_covered(pg_start, pfn_cnt);
+ if (covered < 0)
+ return 0;
+
+ if (covered)
+ goto do_pg_range;
+ }
+
+ /*
+ * If the host has specified a hot-add range; deal with it first.
+ */
+
+ if (rg_size != 0) {
+ ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
+ if (!ha_region)
+ return 0;
+
+ INIT_LIST_HEAD(&ha_region->list);
+ INIT_LIST_HEAD(&ha_region->gap_list);
+
+ ha_region->start_pfn = rg_start;
+ ha_region->ha_end_pfn = rg_start;
+ ha_region->covered_start_pfn = pg_start;
+ ha_region->covered_end_pfn = pg_start;
+ ha_region->end_pfn = rg_start + rg_size;
+
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_add_tail(&ha_region->list, &dm_device.ha_region_list);
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ }
+
+do_pg_range:
+ /*
+ * Process the page range specified; bringing them
+ * online if possible.
+ */
+ return handle_pg_range(pg_start, pfn_cnt);
+}
+
+#endif
+
+static void hot_add_req(struct work_struct *dummy)
+{
+ struct dm_hot_add_response resp;
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unsigned long pg_start, pfn_cnt;
+ unsigned long rg_start, rg_sz;
+#endif
+ struct hv_dynmem_device *dm = &dm_device;
+
+ memset(&resp, 0, sizeof(struct dm_hot_add_response));
+ resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
+ resp.hdr.size = sizeof(struct dm_hot_add_response);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
+ pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
+
+ rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
+ rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
+
+ if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
+ unsigned long region_size;
+ unsigned long region_start;
+
+ /*
+ * The host has not specified the hot-add region.
+ * Based on the hot-add page range being specified,
+ * compute a hot-add region that can cover the pages
+ * that need to be hot-added while ensuring the alignment
+ * and size requirements of Linux as it relates to hot-add.
+ */
+ region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
+ if (pfn_cnt % HA_CHUNK)
+ region_size += HA_CHUNK;
+
+ region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
+
+ rg_start = region_start;
+ rg_sz = region_size;
+ }
+
+ if (do_hot_add)
+ resp.page_count = process_hot_add(pg_start, pfn_cnt,
+ rg_start, rg_sz);
+
+ dm->num_pages_added += resp.page_count;
+#endif
+ /*
+ * The result field of the response structure has the
+ * following semantics:
+ *
+ * 1. If all or some pages hot-added: Guest should return success.
+ *
+ * 2. If no pages could be hot-added:
+ *
+ * If the guest returns success, then the host
+ * will not attempt any further hot-add operations. This
+ * signifies a permanent failure.
+ *
+ * If the guest returns failure, then this failure will be
+ * treated as a transient failure and the host may retry the
+ * hot-add operation after some delay.
+ */
+ if (resp.page_count > 0)
+ resp.result = 1;
+ else if (!do_hot_add)
+ resp.result = 1;
+ else
+ resp.result = 0;
+
+ if (!do_hot_add || resp.page_count == 0) {
+ if (!allow_hibernation)
+ pr_err("Memory hot add failed\n");
+ else
+ pr_info("Ignore hot-add request!\n");
+ }
+
+ dm->state = DM_INITIALIZED;
+ resp.hdr.trans_id = atomic_inc_return(&trans_id);
+ vmbus_sendpacket(dm->dev->channel, &resp,
+ sizeof(struct dm_hot_add_response),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
+{
+ struct dm_info_header *info_hdr;
+
+ info_hdr = (struct dm_info_header *)msg->info;
+
+ switch (info_hdr->type) {
+ case INFO_TYPE_MAX_PAGE_CNT:
+ if (info_hdr->data_size == sizeof(__u64)) {
+ __u64 *max_page_count = (__u64 *)&info_hdr[1];
+
+ pr_info("Max. dynamic memory size: %llu MB\n",
+ (*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
+ dm->max_dynamic_page_count = *max_page_count;
+ }
+
+ break;
+ default:
+ pr_warn("Received Unknown type: %d\n", info_hdr->type);
+ }
+}
+
+static unsigned long compute_balloon_floor(void)
+{
+ unsigned long min_pages;
+ unsigned long nr_pages = totalram_pages();
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+ /* Simple continuous piecewiese linear function:
+ * max MiB -> min MiB gradient
+ * 0 0
+ * 16 16
+ * 32 24
+ * 128 72 (1/2)
+ * 512 168 (1/4)
+ * 2048 360 (1/8)
+ * 8192 744 (1/16)
+ * 32768 1512 (1/32)
+ */
+ if (nr_pages < MB2PAGES(128))
+ min_pages = MB2PAGES(8) + (nr_pages >> 1);
+ else if (nr_pages < MB2PAGES(512))
+ min_pages = MB2PAGES(40) + (nr_pages >> 2);
+ else if (nr_pages < MB2PAGES(2048))
+ min_pages = MB2PAGES(104) + (nr_pages >> 3);
+ else if (nr_pages < MB2PAGES(8192))
+ min_pages = MB2PAGES(232) + (nr_pages >> 4);
+ else
+ min_pages = MB2PAGES(488) + (nr_pages >> 5);
+#undef MB2PAGES
+ return min_pages;
+}
+
+/*
+ * Compute total committed memory pages
+ */
+
+static unsigned long get_pages_committed(struct hv_dynmem_device *dm)
+{
+ return vm_memory_committed() +
+ dm->num_pages_ballooned +
+ (dm->num_pages_added > dm->num_pages_onlined ?
+ dm->num_pages_added - dm->num_pages_onlined : 0) +
+ compute_balloon_floor();
+}
+
+/*
+ * Post our status as it relates memory pressure to the
+ * host. Host expects the guests to post this status
+ * periodically at 1 second intervals.
+ *
+ * The metrics specified in this protocol are very Windows
+ * specific and so we cook up numbers here to convey our memory
+ * pressure.
+ */
+
+static void post_status(struct hv_dynmem_device *dm)
+{
+ struct dm_status status;
+ unsigned long now = jiffies;
+ unsigned long last_post = last_post_time;
+ unsigned long num_pages_avail, num_pages_committed;
+
+ if (pressure_report_delay > 0) {
+ --pressure_report_delay;
+ return;
+ }
+
+ if (!time_after(now, (last_post_time + HZ)))
+ return;
+
+ memset(&status, 0, sizeof(struct dm_status));
+ status.hdr.type = DM_STATUS_REPORT;
+ status.hdr.size = sizeof(struct dm_status);
+ status.hdr.trans_id = atomic_inc_return(&trans_id);
+
+ /*
+ * The host expects the guest to report free and committed memory.
+ * Furthermore, the host expects the pressure information to include
+ * the ballooned out pages. For a given amount of memory that we are
+ * managing we need to compute a floor below which we should not
+ * balloon. Compute this and add it to the pressure report.
+ * We also need to report all offline pages (num_pages_added -
+ * num_pages_onlined) as committed to the host, otherwise it can try
+ * asking us to balloon them out.
+ */
+ num_pages_avail = si_mem_available();
+ num_pages_committed = get_pages_committed(dm);
+
+ trace_balloon_status(num_pages_avail, num_pages_committed,
+ vm_memory_committed(), dm->num_pages_ballooned,
+ dm->num_pages_added, dm->num_pages_onlined);
+
+ /* Convert numbers of pages into numbers of HV_HYP_PAGEs. */
+ status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE;
+ status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE;
+
+ /*
+ * If our transaction ID is no longer current, just don't
+ * send the status. This can happen if we were interrupted
+ * after we picked our transaction ID.
+ */
+ if (status.hdr.trans_id != atomic_read(&trans_id))
+ return;
+
+ /*
+ * If the last post time that we sampled has changed,
+ * we have raced, don't post the status.
+ */
+ if (last_post != last_post_time)
+ return;
+
+ last_post_time = jiffies;
+ vmbus_sendpacket(dm->dev->channel, &status,
+ sizeof(struct dm_status),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+
+}
+
+static void free_balloon_pages(struct hv_dynmem_device *dm,
+ union dm_mem_page_range *range_array)
+{
+ int num_pages = range_array->finfo.page_cnt;
+ __u64 start_frame = range_array->finfo.start_page;
+ struct page *pg;
+ int i;
+
+ for (i = 0; i < num_pages; i++) {
+ pg = pfn_to_page(i + start_frame);
+ __ClearPageOffline(pg);
+ __free_page(pg);
+ dm->num_pages_ballooned--;
+ adjust_managed_page_count(pg, 1);
+ }
+}
+
+
+
+static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
+ unsigned int num_pages,
+ struct dm_balloon_response *bl_resp,
+ int alloc_unit)
+{
+ unsigned int i, j;
+ struct page *pg;
+
+ for (i = 0; i < num_pages / alloc_unit; i++) {
+ if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
+ HV_HYP_PAGE_SIZE)
+ return i * alloc_unit;
+
+ /*
+ * We execute this code in a thread context. Furthermore,
+ * we don't want the kernel to try too hard.
+ */
+ pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
+ __GFP_NOMEMALLOC | __GFP_NOWARN,
+ get_order(alloc_unit << PAGE_SHIFT));
+
+ if (!pg)
+ return i * alloc_unit;
+
+ dm->num_pages_ballooned += alloc_unit;
+
+ /*
+ * If we allocatted 2M pages; split them so we
+ * can free them in any order we get.
+ */
+
+ if (alloc_unit != 1)
+ split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
+
+ /* mark all pages offline */
+ for (j = 0; j < alloc_unit; j++) {
+ __SetPageOffline(pg + j);
+ adjust_managed_page_count(pg + j, -1);
+ }
+
+ bl_resp->range_count++;
+ bl_resp->range_array[i].finfo.start_page =
+ page_to_pfn(pg);
+ bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
+ bl_resp->hdr.size += sizeof(union dm_mem_page_range);
+
+ }
+
+ return i * alloc_unit;
+}
+
+static void balloon_up(struct work_struct *dummy)
+{
+ unsigned int num_pages = dm_device.balloon_wrk.num_pages;
+ unsigned int num_ballooned = 0;
+ struct dm_balloon_response *bl_resp;
+ int alloc_unit;
+ int ret;
+ bool done = false;
+ int i;
+ long avail_pages;
+ unsigned long floor;
+
+ /*
+ * We will attempt 2M allocations. However, if we fail to
+ * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
+ */
+ alloc_unit = PAGES_IN_2M;
+
+ avail_pages = si_mem_available();
+ floor = compute_balloon_floor();
+
+ /* Refuse to balloon below the floor. */
+ if (avail_pages < num_pages || avail_pages - num_pages < floor) {
+ pr_info("Balloon request will be partially fulfilled. %s\n",
+ avail_pages < num_pages ? "Not enough memory." :
+ "Balloon floor reached.");
+
+ num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
+ }
+
+ while (!done) {
+ memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
+ bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
+ bl_resp->hdr.type = DM_BALLOON_RESPONSE;
+ bl_resp->hdr.size = sizeof(struct dm_balloon_response);
+ bl_resp->more_pages = 1;
+
+ num_pages -= num_ballooned;
+ num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
+ bl_resp, alloc_unit);
+
+ if (alloc_unit != 1 && num_ballooned == 0) {
+ alloc_unit = 1;
+ continue;
+ }
+
+ if (num_ballooned == 0 || num_ballooned == num_pages) {
+ pr_debug("Ballooned %u out of %u requested pages.\n",
+ num_pages, dm_device.balloon_wrk.num_pages);
+
+ bl_resp->more_pages = 0;
+ done = true;
+ dm_device.state = DM_INITIALIZED;
+ }
+
+ /*
+ * We are pushing a lot of data through the channel;
+ * deal with transient failures caused because of the
+ * lack of space in the ring buffer.
+ */
+
+ do {
+ bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
+ ret = vmbus_sendpacket(dm_device.dev->channel,
+ bl_resp,
+ bl_resp->hdr.size,
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+
+ if (ret == -EAGAIN)
+ msleep(20);
+ post_status(&dm_device);
+ } while (ret == -EAGAIN);
+
+ if (ret) {
+ /*
+ * Free up the memory we allocatted.
+ */
+ pr_err("Balloon response failed\n");
+
+ for (i = 0; i < bl_resp->range_count; i++)
+ free_balloon_pages(&dm_device,
+ &bl_resp->range_array[i]);
+
+ done = true;
+ }
+ }
+
+}
+
+static void balloon_down(struct hv_dynmem_device *dm,
+ struct dm_unballoon_request *req)
+{
+ union dm_mem_page_range *range_array = req->range_array;
+ int range_count = req->range_count;
+ struct dm_unballoon_response resp;
+ int i;
+ unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
+
+ for (i = 0; i < range_count; i++) {
+ free_balloon_pages(dm, &range_array[i]);
+ complete(&dm_device.config_event);
+ }
+
+ pr_debug("Freed %u ballooned pages.\n",
+ prev_pages_ballooned - dm->num_pages_ballooned);
+
+ if (req->more_pages == 1)
+ return;
+
+ memset(&resp, 0, sizeof(struct dm_unballoon_response));
+ resp.hdr.type = DM_UNBALLOON_RESPONSE;
+ resp.hdr.trans_id = atomic_inc_return(&trans_id);
+ resp.hdr.size = sizeof(struct dm_unballoon_response);
+
+ vmbus_sendpacket(dm_device.dev->channel, &resp,
+ sizeof(struct dm_unballoon_response),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+
+ dm->state = DM_INITIALIZED;
+}
+
+static void balloon_onchannelcallback(void *context);
+
+static int dm_thread_func(void *dm_dev)
+{
+ struct hv_dynmem_device *dm = dm_dev;
+
+ while (!kthread_should_stop()) {
+ wait_for_completion_interruptible_timeout(
+ &dm_device.config_event, 1*HZ);
+ /*
+ * The host expects us to post information on the memory
+ * pressure every second.
+ */
+ reinit_completion(&dm_device.config_event);
+ post_status(dm);
+ }
+
+ return 0;
+}
+
+
+static void version_resp(struct hv_dynmem_device *dm,
+ struct dm_version_response *vresp)
+{
+ struct dm_version_request version_req;
+ int ret;
+
+ if (vresp->is_accepted) {
+ /*
+ * We are done; wakeup the
+ * context waiting for version
+ * negotiation.
+ */
+ complete(&dm->host_event);
+ return;
+ }
+ /*
+ * If there are more versions to try, continue
+ * with negotiations; if not
+ * shutdown the service since we are not able
+ * to negotiate a suitable version number
+ * with the host.
+ */
+ if (dm->next_version == 0)
+ goto version_error;
+
+ memset(&version_req, 0, sizeof(struct dm_version_request));
+ version_req.hdr.type = DM_VERSION_REQUEST;
+ version_req.hdr.size = sizeof(struct dm_version_request);
+ version_req.hdr.trans_id = atomic_inc_return(&trans_id);
+ version_req.version.version = dm->next_version;
+ dm->version = version_req.version.version;
+
+ /*
+ * Set the next version to try in case current version fails.
+ * Win7 protocol ought to be the last one to try.
+ */
+ switch (version_req.version.version) {
+ case DYNMEM_PROTOCOL_VERSION_WIN8:
+ dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
+ version_req.is_last_attempt = 0;
+ break;
+ default:
+ dm->next_version = 0;
+ version_req.is_last_attempt = 1;
+ }
+
+ ret = vmbus_sendpacket(dm->dev->channel, &version_req,
+ sizeof(struct dm_version_request),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+
+ if (ret)
+ goto version_error;
+
+ return;
+
+version_error:
+ dm->state = DM_INIT_ERROR;
+ complete(&dm->host_event);
+}
+
+static void cap_resp(struct hv_dynmem_device *dm,
+ struct dm_capabilities_resp_msg *cap_resp)
+{
+ if (!cap_resp->is_accepted) {
+ pr_err("Capabilities not accepted by host\n");
+ dm->state = DM_INIT_ERROR;
+ }
+ complete(&dm->host_event);
+}
+
+static void balloon_onchannelcallback(void *context)
+{
+ struct hv_device *dev = context;
+ u32 recvlen;
+ u64 requestid;
+ struct dm_message *dm_msg;
+ struct dm_header *dm_hdr;
+ struct hv_dynmem_device *dm = hv_get_drvdata(dev);
+ struct dm_balloon *bal_msg;
+ struct dm_hot_add *ha_msg;
+ union dm_mem_page_range *ha_pg_range;
+ union dm_mem_page_range *ha_region;
+
+ memset(recv_buffer, 0, sizeof(recv_buffer));
+ vmbus_recvpacket(dev->channel, recv_buffer,
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
+
+ if (recvlen > 0) {
+ dm_msg = (struct dm_message *)recv_buffer;
+ dm_hdr = &dm_msg->hdr;
+
+ switch (dm_hdr->type) {
+ case DM_VERSION_RESPONSE:
+ version_resp(dm,
+ (struct dm_version_response *)dm_msg);
+ break;
+
+ case DM_CAPABILITIES_RESPONSE:
+ cap_resp(dm,
+ (struct dm_capabilities_resp_msg *)dm_msg);
+ break;
+
+ case DM_BALLOON_REQUEST:
+ if (allow_hibernation) {
+ pr_info("Ignore balloon-up request!\n");
+ break;
+ }
+
+ if (dm->state == DM_BALLOON_UP)
+ pr_warn("Currently ballooning\n");
+ bal_msg = (struct dm_balloon *)recv_buffer;
+ dm->state = DM_BALLOON_UP;
+ dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
+ schedule_work(&dm_device.balloon_wrk.wrk);
+ break;
+
+ case DM_UNBALLOON_REQUEST:
+ if (allow_hibernation) {
+ pr_info("Ignore balloon-down request!\n");
+ break;
+ }
+
+ dm->state = DM_BALLOON_DOWN;
+ balloon_down(dm,
+ (struct dm_unballoon_request *)recv_buffer);
+ break;
+
+ case DM_MEM_HOT_ADD_REQUEST:
+ if (dm->state == DM_HOT_ADD)
+ pr_warn("Currently hot-adding\n");
+ dm->state = DM_HOT_ADD;
+ ha_msg = (struct dm_hot_add *)recv_buffer;
+ if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
+ /*
+ * This is a normal hot-add request specifying
+ * hot-add memory.
+ */
+ dm->host_specified_ha_region = false;
+ ha_pg_range = &ha_msg->range;
+ dm->ha_wrk.ha_page_range = *ha_pg_range;
+ dm->ha_wrk.ha_region_range.page_range = 0;
+ } else {
+ /*
+ * Host is specifying that we first hot-add
+ * a region and then partially populate this
+ * region.
+ */
+ dm->host_specified_ha_region = true;
+ ha_pg_range = &ha_msg->range;
+ ha_region = &ha_pg_range[1];
+ dm->ha_wrk.ha_page_range = *ha_pg_range;
+ dm->ha_wrk.ha_region_range = *ha_region;
+ }
+ schedule_work(&dm_device.ha_wrk.wrk);
+ break;
+
+ case DM_INFO_MESSAGE:
+ process_info(dm, (struct dm_info_msg *)dm_msg);
+ break;
+
+ default:
+ pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
+
+ }
+ }
+
+}
+
+/* Hyper-V only supports reporting 2MB pages or higher */
+#define HV_MIN_PAGE_REPORTING_ORDER 9
+#define HV_MIN_PAGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << HV_MIN_PAGE_REPORTING_ORDER)
+static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
+ struct scatterlist *sgl, unsigned int nents)
+{
+ unsigned long flags;
+ struct hv_memory_hint *hint;
+ int i;
+ u64 status;
+ struct scatterlist *sg;
+
+ WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
+ WARN_ON_ONCE(sgl->length < HV_MIN_PAGE_REPORTING_LEN);
+ local_irq_save(flags);
+ hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg);
+ if (!hint) {
+ local_irq_restore(flags);
+ return -ENOSPC;
+ }
+
+ hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
+ hint->reserved = 0;
+ for_each_sg(sgl, sg, nents, i) {
+ union hv_gpa_page_range *range;
+
+ range = &hint->ranges[i];
+ range->address_space = 0;
+ /* page reporting only reports 2MB pages or higher */
+ range->page.largepage = 1;
+ range->page.additional_pages =
+ (sg->length / HV_MIN_PAGE_REPORTING_LEN) - 1;
+ range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
+ range->base_large_pfn =
+ page_to_hvpfn(sg_page(sg)) >> HV_MIN_PAGE_REPORTING_ORDER;
+ }
+
+ status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
+ hint, NULL);
+ local_irq_restore(flags);
+ if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+ pr_err("Cold memory discard hypercall failed with status %llx\n",
+ status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void enable_page_reporting(void)
+{
+ int ret;
+
+ /* Essentially, validating 'PAGE_REPORTING_MIN_ORDER' is big enough. */
+ if (pageblock_order < HV_MIN_PAGE_REPORTING_ORDER) {
+ pr_debug("Cold memory discard is only supported on 2MB pages and above\n");
+ return;
+ }
+
+ if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) {
+ pr_debug("Cold memory discard hint not supported by Hyper-V\n");
+ return;
+ }
+
+ BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
+ dm_device.pr_dev_info.report = hv_free_page_report;
+ ret = page_reporting_register(&dm_device.pr_dev_info);
+ if (ret < 0) {
+ dm_device.pr_dev_info.report = NULL;
+ pr_err("Failed to enable cold memory discard: %d\n", ret);
+ } else {
+ pr_info("Cold memory discard hint enabled\n");
+ }
+}
+
+static void disable_page_reporting(void)
+{
+ if (dm_device.pr_dev_info.report) {
+ page_reporting_unregister(&dm_device.pr_dev_info);
+ dm_device.pr_dev_info.report = NULL;
+ }
+}
+
+static int ballooning_enabled(void)
+{
+ /*
+ * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
+ * since currently it's unclear to us whether an unballoon request can
+ * make sure all page ranges are guest page size aligned.
+ */
+ if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
+ pr_info("Ballooning disabled because page size is not 4096 bytes\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int hot_add_enabled(void)
+{
+ /*
+ * Disable hot add on ARM64, because we currently rely on
+ * memory_add_physaddr_to_nid() to get a node id of a hot add range,
+ * however ARM64's memory_add_physaddr_to_nid() always return 0 and
+ * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
+ * add_memory().
+ */
+ if (IS_ENABLED(CONFIG_ARM64)) {
+ pr_info("Memory hot add disabled on ARM64\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int balloon_connect_vsp(struct hv_device *dev)
+{
+ struct dm_version_request version_req;
+ struct dm_capabilities cap_msg;
+ unsigned long t;
+ int ret;
+
+ /*
+ * max_pkt_size should be large enough for one vmbus packet header plus
+ * our receive buffer size. Hyper-V sends messages up to
+ * HV_HYP_PAGE_SIZE bytes long on balloon channel.
+ */
+ dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
+
+ ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
+ balloon_onchannelcallback, dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Initiate the hand shake with the host and negotiate
+ * a version that the host can support. We start with the
+ * highest version number and go down if the host cannot
+ * support it.
+ */
+ memset(&version_req, 0, sizeof(struct dm_version_request));
+ version_req.hdr.type = DM_VERSION_REQUEST;
+ version_req.hdr.size = sizeof(struct dm_version_request);
+ version_req.hdr.trans_id = atomic_inc_return(&trans_id);
+ version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
+ version_req.is_last_attempt = 0;
+ dm_device.version = version_req.version.version;
+
+ ret = vmbus_sendpacket(dev->channel, &version_req,
+ sizeof(struct dm_version_request),
+ (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
+ if (ret)
+ goto out;
+
+ t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /*
+ * If we could not negotiate a compatible version with the host
+ * fail the probe function.
+ */
+ if (dm_device.state == DM_INIT_ERROR) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ pr_info("Using Dynamic Memory protocol version %u.%u\n",
+ DYNMEM_MAJOR_VERSION(dm_device.version),
+ DYNMEM_MINOR_VERSION(dm_device.version));
+
+ /*
+ * Now submit our capabilities to the host.
+ */
+ memset(&cap_msg, 0, sizeof(struct dm_capabilities));
+ cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
+ cap_msg.hdr.size = sizeof(struct dm_capabilities);
+ cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
+
+ /*
+ * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
+ * currently still requires the bits to be set, so we have to add code
+ * to fail the host's hot-add and balloon up/down requests, if any.
+ */
+ cap_msg.caps.cap_bits.balloon = ballooning_enabled();
+ cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
+
+ /*
+ * Specify our alignment requirements as it relates
+ * memory hot-add. Specify 128MB alignment.
+ */
+ cap_msg.caps.cap_bits.hot_add_alignment = 7;
+
+ /*
+ * Currently the host does not use these
+ * values and we set them to what is done in the
+ * Windows driver.
+ */
+ cap_msg.min_page_cnt = 0;
+ cap_msg.max_page_number = -1;
+
+ ret = vmbus_sendpacket(dev->channel, &cap_msg,
+ sizeof(struct dm_capabilities),
+ (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
+ if (ret)
+ goto out;
+
+ t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /*
+ * If the host does not like our capabilities,
+ * fail the probe function.
+ */
+ if (dm_device.state == DM_INIT_ERROR) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ return 0;
+out:
+ vmbus_close(dev->channel);
+ return ret;
+}
+
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * hv_balloon_debug_show - shows statistics of balloon operations.
+ * @f: pointer to the &struct seq_file.
+ * @offset: ignored.
+ *
+ * Provides the statistics that can be accessed in hv-balloon in the debugfs.
+ *
+ * Return: zero on success or an error code.
+ */
+static int hv_balloon_debug_show(struct seq_file *f, void *offset)
+{
+ struct hv_dynmem_device *dm = f->private;
+ char *sname;
+
+ seq_printf(f, "%-22s: %u.%u\n", "host_version",
+ DYNMEM_MAJOR_VERSION(dm->version),
+ DYNMEM_MINOR_VERSION(dm->version));
+
+ seq_printf(f, "%-22s:", "capabilities");
+ if (ballooning_enabled())
+ seq_puts(f, " enabled");
+
+ if (hot_add_enabled())
+ seq_puts(f, " hot_add");
+
+ seq_puts(f, "\n");
+
+ seq_printf(f, "%-22s: %u", "state", dm->state);
+ switch (dm->state) {
+ case DM_INITIALIZING:
+ sname = "Initializing";
+ break;
+ case DM_INITIALIZED:
+ sname = "Initialized";
+ break;
+ case DM_BALLOON_UP:
+ sname = "Balloon Up";
+ break;
+ case DM_BALLOON_DOWN:
+ sname = "Balloon Down";
+ break;
+ case DM_HOT_ADD:
+ sname = "Hot Add";
+ break;
+ case DM_INIT_ERROR:
+ sname = "Error";
+ break;
+ default:
+ sname = "Unknown";
+ }
+ seq_printf(f, " (%s)\n", sname);
+
+ /* HV Page Size */
+ seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE);
+
+ /* Pages added with hot_add */
+ seq_printf(f, "%-22s: %u\n", "pages_added", dm->num_pages_added);
+
+ /* pages that are "onlined"/used from pages_added */
+ seq_printf(f, "%-22s: %u\n", "pages_onlined", dm->num_pages_onlined);
+
+ /* pages we have given back to host */
+ seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
+
+ seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
+ get_pages_committed(dm));
+
+ seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
+ dm->max_dynamic_page_count);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
+
+static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+ debugfs_create_file("hv-balloon", 0444, NULL, b,
+ &hv_balloon_debug_fops);
+}
+
+static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+ debugfs_lookup_and_remove("hv-balloon", NULL);
+}
+
+#else
+
+static inline void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+}
+
+static inline void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int balloon_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ int ret;
+
+ allow_hibernation = hv_is_hibernation_supported();
+ if (allow_hibernation)
+ hot_add = false;
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ do_hot_add = hot_add;
+#else
+ do_hot_add = false;
+#endif
+ dm_device.dev = dev;
+ dm_device.state = DM_INITIALIZING;
+ dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
+ init_completion(&dm_device.host_event);
+ init_completion(&dm_device.config_event);
+ INIT_LIST_HEAD(&dm_device.ha_region_list);
+ spin_lock_init(&dm_device.ha_lock);
+ INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
+ INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
+ dm_device.host_specified_ha_region = false;
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ set_online_page_callback(&hv_online_page);
+ init_completion(&dm_device.ol_waitevent);
+ register_memory_notifier(&hv_memory_nb);
+#endif
+
+ hv_set_drvdata(dev, &dm_device);
+
+ ret = balloon_connect_vsp(dev);
+ if (ret != 0)
+ goto connect_error;
+
+ enable_page_reporting();
+ dm_device.state = DM_INITIALIZED;
+
+ dm_device.thread =
+ kthread_run(dm_thread_func, &dm_device, "hv_balloon");
+ if (IS_ERR(dm_device.thread)) {
+ ret = PTR_ERR(dm_device.thread);
+ goto probe_error;
+ }
+
+ hv_balloon_debugfs_init(&dm_device);
+
+ return 0;
+
+probe_error:
+ dm_device.state = DM_INIT_ERROR;
+ dm_device.thread = NULL;
+ disable_page_reporting();
+ vmbus_close(dev->channel);
+connect_error:
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unregister_memory_notifier(&hv_memory_nb);
+ restore_online_page_callback(&hv_online_page);
+#endif
+ return ret;
+}
+
+static int balloon_remove(struct hv_device *dev)
+{
+ struct hv_dynmem_device *dm = hv_get_drvdata(dev);
+ struct hv_hotadd_state *has, *tmp;
+ struct hv_hotadd_gap *gap, *tmp_gap;
+ unsigned long flags;
+
+ if (dm->num_pages_ballooned != 0)
+ pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
+
+ hv_balloon_debugfs_exit(dm);
+
+ cancel_work_sync(&dm->balloon_wrk.wrk);
+ cancel_work_sync(&dm->ha_wrk.wrk);
+
+ kthread_stop(dm->thread);
+
+ /*
+ * This is to handle the case when balloon_resume()
+ * call has failed and some cleanup has been done as
+ * a part of the error handling.
+ */
+ if (dm_device.state != DM_INIT_ERROR) {
+ disable_page_reporting();
+ vmbus_close(dev->channel);
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unregister_memory_notifier(&hv_memory_nb);
+ restore_online_page_callback(&hv_online_page);
+#endif
+ }
+
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
+ list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
+ list_del(&gap->list);
+ kfree(gap);
+ }
+ list_del(&has->list);
+ kfree(has);
+ }
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+
+ return 0;
+}
+
+static int balloon_suspend(struct hv_device *hv_dev)
+{
+ struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev);
+
+ tasklet_disable(&hv_dev->channel->callback_event);
+
+ cancel_work_sync(&dm->balloon_wrk.wrk);
+ cancel_work_sync(&dm->ha_wrk.wrk);
+
+ if (dm->thread) {
+ kthread_stop(dm->thread);
+ dm->thread = NULL;
+ vmbus_close(hv_dev->channel);
+ }
+
+ tasklet_enable(&hv_dev->channel->callback_event);
+
+ return 0;
+
+}
+
+static int balloon_resume(struct hv_device *dev)
+{
+ int ret;
+
+ dm_device.state = DM_INITIALIZING;
+
+ ret = balloon_connect_vsp(dev);
+
+ if (ret != 0)
+ goto out;
+
+ dm_device.thread =
+ kthread_run(dm_thread_func, &dm_device, "hv_balloon");
+ if (IS_ERR(dm_device.thread)) {
+ ret = PTR_ERR(dm_device.thread);
+ dm_device.thread = NULL;
+ goto close_channel;
+ }
+
+ dm_device.state = DM_INITIALIZED;
+ return 0;
+close_channel:
+ vmbus_close(dev->channel);
+out:
+ dm_device.state = DM_INIT_ERROR;
+ disable_page_reporting();
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unregister_memory_notifier(&hv_memory_nb);
+ restore_online_page_callback(&hv_online_page);
+#endif
+ return ret;
+}
+
+static const struct hv_vmbus_device_id id_table[] = {
+ /* Dynamic Memory Class ID */
+ /* 525074DC-8985-46e2-8057-A307DC18A502 */
+ { HV_DM_GUID, },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+static struct hv_driver balloon_drv = {
+ .name = "hv_balloon",
+ .id_table = id_table,
+ .probe = balloon_probe,
+ .remove = balloon_remove,
+ .suspend = balloon_suspend,
+ .resume = balloon_resume,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int __init init_balloon_drv(void)
+{
+
+ return vmbus_driver_register(&balloon_drv);
+}
+
+module_init(init_balloon_drv);
+
+MODULE_DESCRIPTION("Hyper-V Balloon");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
new file mode 100644
index 000000000..ae68298c0
--- /dev/null
+++ b/drivers/hv/hv_common.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Architecture neutral utility routines for interacting with
+ * Hyper-V. This file is specifically for code that must be
+ * built-in to the kernel image when CONFIG_HYPERV is set
+ * (vs. being in a module) because it is called from architecture
+ * specific code under arch/.
+ *
+ * Copyright (C) 2021, Microsoft, Inc.
+ *
+ * Author : Michael Kelley <mikelley@microsoft.com>
+ */
+
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/export.h>
+#include <linux/bitfield.h>
+#include <linux/cpumask.h>
+#include <linux/panic_notifier.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/dma-map-ops.h>
+#include <asm/hyperv-tlfs.h>
+#include <asm/mshyperv.h>
+
+/*
+ * hv_root_partition and ms_hyperv are defined here with other Hyper-V
+ * specific globals so they are shared across all architectures and are
+ * built only when CONFIG_HYPERV is defined. But on x86,
+ * ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not
+ * defined, and it uses these two variables. So mark them as __weak
+ * here, allowing for an overriding definition in the module containing
+ * ms_hyperv_init_platform().
+ */
+bool __weak hv_root_partition;
+EXPORT_SYMBOL_GPL(hv_root_partition);
+
+struct ms_hyperv_info __weak ms_hyperv;
+EXPORT_SYMBOL_GPL(ms_hyperv);
+
+u32 *hv_vp_index;
+EXPORT_SYMBOL_GPL(hv_vp_index);
+
+u32 hv_max_vp_index;
+EXPORT_SYMBOL_GPL(hv_max_vp_index);
+
+void * __percpu *hyperv_pcpu_input_arg;
+EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
+
+void * __percpu *hyperv_pcpu_output_arg;
+EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg);
+
+/*
+ * Hyper-V specific initialization and shutdown code that is
+ * common across all architectures. Called from architecture
+ * specific initialization functions.
+ */
+
+void __init hv_common_free(void)
+{
+ kfree(hv_vp_index);
+ hv_vp_index = NULL;
+
+ free_percpu(hyperv_pcpu_output_arg);
+ hyperv_pcpu_output_arg = NULL;
+
+ free_percpu(hyperv_pcpu_input_arg);
+ hyperv_pcpu_input_arg = NULL;
+}
+
+int __init hv_common_init(void)
+{
+ int i;
+
+ /*
+ * Hyper-V expects to get crash register data or kmsg when
+ * crash enlightment is available and system crashes. Set
+ * crash_kexec_post_notifiers to be true to make sure that
+ * calling crash enlightment interface before running kdump
+ * kernel.
+ */
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ crash_kexec_post_notifiers = true;
+ pr_info("Hyper-V: enabling crash_kexec_post_notifiers\n");
+ }
+
+ /*
+ * Allocate the per-CPU state for the hypercall input arg.
+ * If this allocation fails, we will not be able to setup
+ * (per-CPU) hypercall input page and thus this failure is
+ * fatal on Hyper-V.
+ */
+ hyperv_pcpu_input_arg = alloc_percpu(void *);
+ BUG_ON(!hyperv_pcpu_input_arg);
+
+ /* Allocate the per-CPU state for output arg for root */
+ if (hv_root_partition) {
+ hyperv_pcpu_output_arg = alloc_percpu(void *);
+ BUG_ON(!hyperv_pcpu_output_arg);
+ }
+
+ hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
+ GFP_KERNEL);
+ if (!hv_vp_index) {
+ hv_common_free();
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_possible_cpus(); i++)
+ hv_vp_index[i] = VP_INVAL;
+
+ return 0;
+}
+
+/*
+ * Hyper-V specific initialization and die code for
+ * individual CPUs that is common across all architectures.
+ * Called by the CPU hotplug mechanism.
+ */
+
+int hv_common_cpu_init(unsigned int cpu)
+{
+ void **inputarg, **outputarg;
+ u64 msr_vp_index;
+ gfp_t flags;
+ int pgcount = hv_root_partition ? 2 : 1;
+
+ /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
+ flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL;
+
+ inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
+ *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
+ if (!(*inputarg))
+ return -ENOMEM;
+
+ if (hv_root_partition) {
+ outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
+ *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE;
+ }
+
+ msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
+
+ hv_vp_index[cpu] = msr_vp_index;
+
+ if (msr_vp_index > hv_max_vp_index)
+ hv_max_vp_index = msr_vp_index;
+
+ return 0;
+}
+
+int hv_common_cpu_die(unsigned int cpu)
+{
+ unsigned long flags;
+ void **inputarg, **outputarg;
+ void *mem;
+
+ local_irq_save(flags);
+
+ inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
+ mem = *inputarg;
+ *inputarg = NULL;
+
+ if (hv_root_partition) {
+ outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
+ *outputarg = NULL;
+ }
+
+ local_irq_restore(flags);
+
+ kfree(mem);
+
+ return 0;
+}
+
+/* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */
+bool hv_query_ext_cap(u64 cap_query)
+{
+ /*
+ * The address of the 'hv_extended_cap' variable will be used as an
+ * output parameter to the hypercall below and so it should be
+ * compatible with 'virt_to_phys'. Which means, it's address should be
+ * directly mapped. Use 'static' to keep it compatible; stack variables
+ * can be virtually mapped, making them incompatible with
+ * 'virt_to_phys'.
+ * Hypercall input/output addresses should also be 8-byte aligned.
+ */
+ static u64 hv_extended_cap __aligned(8);
+ static bool hv_extended_cap_queried;
+ u64 status;
+
+ /*
+ * Querying extended capabilities is an extended hypercall. Check if the
+ * partition supports extended hypercall, first.
+ */
+ if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS))
+ return false;
+
+ /* Extended capabilities do not change at runtime. */
+ if (hv_extended_cap_queried)
+ return hv_extended_cap & cap_query;
+
+ status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL,
+ &hv_extended_cap);
+
+ /*
+ * The query extended capabilities hypercall should not fail under
+ * any normal circumstances. Avoid repeatedly making the hypercall, on
+ * error.
+ */
+ hv_extended_cap_queried = true;
+ if (!hv_result_success(status)) {
+ pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n",
+ status);
+ return false;
+ }
+
+ return hv_extended_cap & cap_query;
+}
+EXPORT_SYMBOL_GPL(hv_query_ext_cap);
+
+void hv_setup_dma_ops(struct device *dev, bool coherent)
+{
+ /*
+ * Hyper-V does not offer a vIOMMU in the guest
+ * VM, so pass 0/NULL for the IOMMU settings
+ */
+ arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+}
+EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
+
+bool hv_is_hibernation_supported(void)
+{
+ return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
+}
+EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
+
+/*
+ * Default function to read the Hyper-V reference counter, independent
+ * of whether Hyper-V enlightened clocks/timers are being used. But on
+ * architectures where it is used, Hyper-V enlightenment code in
+ * hyperv_timer.c may override this function.
+ */
+static u64 __hv_read_ref_counter(void)
+{
+ return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
+}
+
+u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter;
+EXPORT_SYMBOL_GPL(hv_read_reference_counter);
+
+/* These __weak functions provide default "no-op" behavior and
+ * may be overridden by architecture specific versions. Architectures
+ * for which the default "no-op" behavior is sufficient can leave
+ * them unimplemented and not be cluttered with a bunch of stub
+ * functions in arch-specific code.
+ */
+
+bool __weak hv_is_isolation_supported(void)
+{
+ return false;
+}
+EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
+
+bool __weak hv_isolation_type_snp(void)
+{
+ return false;
+}
+EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
+
+void __weak hv_setup_vmbus_handler(void (*handler)(void))
+{
+}
+EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler);
+
+void __weak hv_remove_vmbus_handler(void)
+{
+}
+EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler);
+
+void __weak hv_setup_kexec_handler(void (*handler)(void))
+{
+}
+EXPORT_SYMBOL_GPL(hv_setup_kexec_handler);
+
+void __weak hv_remove_kexec_handler(void)
+{
+}
+EXPORT_SYMBOL_GPL(hv_remove_kexec_handler);
+
+void __weak hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
+{
+}
+EXPORT_SYMBOL_GPL(hv_setup_crash_handler);
+
+void __weak hv_remove_crash_handler(void)
+{
+}
+EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
+
+void __weak hyperv_cleanup(void)
+{
+}
+EXPORT_SYMBOL_GPL(hyperv_cleanup);
+
+u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
+{
+ return HV_STATUS_INVALID_PARAMETER;
+}
+EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);
+
+void __weak *hv_map_memory(void *addr, unsigned long size)
+{
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hv_map_memory);
+
+void __weak hv_unmap_memory(void *addr)
+{
+}
+EXPORT_SYMBOL_GPL(hv_unmap_memory);
diff --git a/drivers/hv/hv_debugfs.c b/drivers/hv/hv_debugfs.c
new file mode 100644
index 000000000..ccf752b66
--- /dev/null
+++ b/drivers/hv/hv_debugfs.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Authors:
+ * Branden Bonaby <brandonbonaby94@gmail.com>
+ */
+
+#include <linux/hyperv.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include "hyperv_vmbus.h"
+
+static struct dentry *hv_debug_root;
+
+static int hv_debugfs_delay_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+
+static int hv_debugfs_delay_set(void *data, u64 val)
+{
+ if (val > 1000)
+ return -EINVAL;
+ *(u32 *)data = val;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_delay_fops, hv_debugfs_delay_get,
+ hv_debugfs_delay_set, "%llu\n");
+
+static int hv_debugfs_state_get(void *data, u64 *val)
+{
+ *val = *(bool *)data;
+ return 0;
+}
+
+static int hv_debugfs_state_set(void *data, u64 val)
+{
+ if (val == 1)
+ *(bool *)data = true;
+ else if (val == 0)
+ *(bool *)data = false;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_state_fops, hv_debugfs_state_get,
+ hv_debugfs_state_set, "%llu\n");
+
+/* Setup delay files to store test values */
+static int hv_debug_delay_files(struct hv_device *dev, struct dentry *root)
+{
+ struct vmbus_channel *channel = dev->channel;
+ char *buffer = "fuzz_test_buffer_interrupt_delay";
+ char *message = "fuzz_test_message_delay";
+ int *buffer_val = &channel->fuzz_testing_interrupt_delay;
+ int *message_val = &channel->fuzz_testing_message_delay;
+ struct dentry *buffer_file, *message_file;
+
+ buffer_file = debugfs_create_file(buffer, 0644, root,
+ buffer_val,
+ &hv_debugfs_delay_fops);
+ if (IS_ERR(buffer_file)) {
+ pr_debug("debugfs_hyperv: file %s not created\n", buffer);
+ return PTR_ERR(buffer_file);
+ }
+
+ message_file = debugfs_create_file(message, 0644, root,
+ message_val,
+ &hv_debugfs_delay_fops);
+ if (IS_ERR(message_file)) {
+ pr_debug("debugfs_hyperv: file %s not created\n", message);
+ return PTR_ERR(message_file);
+ }
+
+ return 0;
+}
+
+/* Setup test state value for vmbus device */
+static int hv_debug_set_test_state(struct hv_device *dev, struct dentry *root)
+{
+ struct vmbus_channel *channel = dev->channel;
+ bool *state = &channel->fuzz_testing_state;
+ char *status = "fuzz_test_state";
+ struct dentry *test_state;
+
+ test_state = debugfs_create_file(status, 0644, root,
+ state,
+ &hv_debugfs_state_fops);
+ if (IS_ERR(test_state)) {
+ pr_debug("debugfs_hyperv: file %s not created\n", status);
+ return PTR_ERR(test_state);
+ }
+
+ return 0;
+}
+
+/* Bind hv device to a dentry for debugfs */
+static void hv_debug_set_dir_dentry(struct hv_device *dev, struct dentry *root)
+{
+ if (hv_debug_root)
+ dev->debug_dir = root;
+}
+
+/* Create all test dentry's and names for fuzz testing */
+int hv_debug_add_dev_dir(struct hv_device *dev)
+{
+ const char *device = dev_name(&dev->device);
+ char *delay_name = "delay";
+ struct dentry *delay, *dev_root;
+ int ret;
+
+ if (!IS_ERR(hv_debug_root)) {
+ dev_root = debugfs_create_dir(device, hv_debug_root);
+ if (IS_ERR(dev_root)) {
+ pr_debug("debugfs_hyperv: hyperv/%s/ not created\n",
+ device);
+ return PTR_ERR(dev_root);
+ }
+ hv_debug_set_test_state(dev, dev_root);
+ hv_debug_set_dir_dentry(dev, dev_root);
+ delay = debugfs_create_dir(delay_name, dev_root);
+
+ if (IS_ERR(delay)) {
+ pr_debug("debugfs_hyperv: hyperv/%s/%s/ not created\n",
+ device, delay_name);
+ return PTR_ERR(delay);
+ }
+ ret = hv_debug_delay_files(dev, delay);
+
+ return ret;
+ }
+ pr_debug("debugfs_hyperv: hyperv/ not in root debugfs path\n");
+ return PTR_ERR(hv_debug_root);
+}
+
+/* Remove dentry associated with released hv device */
+void hv_debug_rm_dev_dir(struct hv_device *dev)
+{
+ if (!IS_ERR(hv_debug_root))
+ debugfs_remove_recursive(dev->debug_dir);
+}
+
+/* Remove all dentrys associated with vmbus testing */
+void hv_debug_rm_all_dir(void)
+{
+ debugfs_remove_recursive(hv_debug_root);
+}
+
+/* Delay buffer/message reads on a vmbus channel */
+void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type)
+{
+ struct vmbus_channel *test_channel = channel->primary_channel ?
+ channel->primary_channel :
+ channel;
+ bool state = test_channel->fuzz_testing_state;
+
+ if (state) {
+ if (delay_type == 0)
+ udelay(test_channel->fuzz_testing_interrupt_delay);
+ else
+ udelay(test_channel->fuzz_testing_message_delay);
+ }
+}
+
+/* Initialize top dentry for vmbus testing */
+int hv_debug_init(void)
+{
+ hv_debug_root = debugfs_create_dir("hyperv", NULL);
+ if (IS_ERR(hv_debug_root)) {
+ pr_debug("debugfs_hyperv: hyperv/ not created\n");
+ return PTR_ERR(hv_debug_root);
+ }
+ return 0;
+}
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
new file mode 100644
index 000000000..922d83eb7
--- /dev/null
+++ b/drivers/hv/hv_fcopy.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * An implementation of file copy service.
+ *
+ * Copyright (C) 2014, Microsoft, Inc.
+ *
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/nls.h>
+#include <linux/workqueue.h>
+#include <linux/hyperv.h>
+#include <linux/sched.h>
+#include <asm/hyperv-tlfs.h>
+
+#include "hyperv_vmbus.h"
+#include "hv_utils_transport.h"
+
+#define WIN8_SRV_MAJOR 1
+#define WIN8_SRV_MINOR 1
+#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+
+#define FCOPY_VER_COUNT 1
+static const int fcopy_versions[] = {
+ WIN8_SRV_VERSION
+};
+
+#define FW_VER_COUNT 1
+static const int fw_versions[] = {
+ UTIL_FW_VERSION
+};
+
+/*
+ * Global state maintained for transaction that is being processed.
+ * For a class of integration services, including the "file copy service",
+ * the specified protocol is a "request/response" protocol which means that
+ * there can only be single outstanding transaction from the host at any
+ * given point in time. We use this to simplify memory management in this
+ * driver - we cache and process only one message at a time.
+ *
+ * While the request/response protocol is guaranteed by the host, we further
+ * ensure this by serializing packet processing in this driver - we do not
+ * read additional packets from the VMBUs until the current packet is fully
+ * handled.
+ */
+
+static struct {
+ int state; /* hvutil_device_state */
+ int recv_len; /* number of bytes received. */
+ struct hv_fcopy_hdr *fcopy_msg; /* current message */
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+} fcopy_transaction;
+
+static void fcopy_respond_to_host(int error);
+static void fcopy_send_data(struct work_struct *dummy);
+static void fcopy_timeout_func(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(fcopy_timeout_work, fcopy_timeout_func);
+static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
+static const char fcopy_devname[] = "vmbus/hv_fcopy";
+static u8 *recv_buffer;
+static struct hvutil_transport *hvt;
+/*
+ * This state maintains the version number registered by the daemon.
+ */
+static int dm_reg_value;
+
+static void fcopy_poll_wrapper(void *channel)
+{
+ /* Transaction is finished, reset the state here to avoid races. */
+ fcopy_transaction.state = HVUTIL_READY;
+ tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
+}
+
+static void fcopy_timeout_func(struct work_struct *dummy)
+{
+ /*
+ * If the timer fires, the user-mode component has not responded;
+ * process the pending transaction.
+ */
+ fcopy_respond_to_host(HV_E_FAIL);
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
+}
+
+static void fcopy_register_done(void)
+{
+ pr_debug("FCP: userspace daemon registered\n");
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
+}
+
+static int fcopy_handle_handshake(u32 version)
+{
+ u32 our_ver = FCOPY_CURRENT_VERSION;
+
+ switch (version) {
+ case FCOPY_VERSION_0:
+ /* Daemon doesn't expect us to reply */
+ dm_reg_value = version;
+ break;
+ case FCOPY_VERSION_1:
+ /* Daemon expects us to reply with our own version */
+ if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
+ fcopy_register_done))
+ return -EFAULT;
+ dm_reg_value = version;
+ break;
+ default:
+ /*
+ * For now we will fail the registration.
+ * If and when we have multiple versions to
+ * deal with, we will be backward compatible.
+ * We will add this code when needed.
+ */
+ return -EINVAL;
+ }
+ pr_debug("FCP: userspace daemon ver. %d connected\n", version);
+ return 0;
+}
+
+static void fcopy_send_data(struct work_struct *dummy)
+{
+ struct hv_start_fcopy *smsg_out = NULL;
+ int operation = fcopy_transaction.fcopy_msg->operation;
+ struct hv_start_fcopy *smsg_in;
+ void *out_src;
+ int rc, out_len;
+
+ /*
+ * The strings sent from the host are encoded in
+ * utf16; convert it to utf8 strings.
+ * The host assures us that the utf16 strings will not exceed
+ * the max lengths specified. We will however, reserve room
+ * for the string terminating character - in the utf16s_utf8s()
+ * function we limit the size of the buffer where the converted
+ * string is placed to W_MAX_PATH -1 to guarantee
+ * that the strings can be properly terminated!
+ */
+
+ switch (operation) {
+ case START_FILE_COPY:
+ out_len = sizeof(struct hv_start_fcopy);
+ smsg_out = kzalloc(sizeof(*smsg_out), GFP_KERNEL);
+ if (!smsg_out)
+ return;
+
+ smsg_out->hdr.operation = operation;
+ smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
+
+ utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)&smsg_out->file_name, W_MAX_PATH - 1);
+
+ utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)&smsg_out->path_name, W_MAX_PATH - 1);
+
+ smsg_out->copy_flags = smsg_in->copy_flags;
+ smsg_out->file_size = smsg_in->file_size;
+ out_src = smsg_out;
+ break;
+
+ case WRITE_TO_FILE:
+ out_src = fcopy_transaction.fcopy_msg;
+ out_len = sizeof(struct hv_do_fcopy);
+ break;
+ default:
+ out_src = fcopy_transaction.fcopy_msg;
+ out_len = fcopy_transaction.recv_len;
+ break;
+ }
+
+ fcopy_transaction.state = HVUTIL_USERSPACE_REQ;
+ rc = hvutil_transport_send(hvt, out_src, out_len, NULL);
+ if (rc) {
+ pr_debug("FCP: failed to communicate to the daemon: %d\n", rc);
+ if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
+ fcopy_respond_to_host(HV_E_FAIL);
+ fcopy_transaction.state = HVUTIL_READY;
+ }
+ }
+ kfree(smsg_out);
+}
+
+/*
+ * Send a response back to the host.
+ */
+
+static void
+fcopy_respond_to_host(int error)
+{
+ struct icmsg_hdr *icmsghdr;
+ u32 buf_len;
+ struct vmbus_channel *channel;
+ u64 req_id;
+
+ /*
+ * Copy the global state for completing the transaction. Note that
+ * only one transaction can be active at a time. This is guaranteed
+ * by the file copy protocol implemented by the host. Furthermore,
+ * the "transaction active" state we maintain ensures that there can
+ * only be one active transaction at a time.
+ */
+
+ buf_len = fcopy_transaction.recv_len;
+ channel = fcopy_transaction.recv_channel;
+ req_id = fcopy_transaction.recv_req_id;
+
+ icmsghdr = (struct icmsg_hdr *)
+ &recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (channel->onchannel_callback == NULL)
+ /*
+ * We have raced with util driver being unloaded;
+ * silently return.
+ */
+ return;
+
+ icmsghdr->status = error;
+ icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+void hv_fcopy_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct hv_fcopy_hdr *fcopy_msg;
+ struct icmsg_hdr *icmsghdr;
+ int fcopy_srv_version;
+
+ if (fcopy_transaction.state > HVUTIL_READY)
+ return;
+
+ if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) {
+ pr_err_ratelimited("Fcopy request received. Could not read into recv buf\n");
+ return;
+ }
+
+ if (!recvlen)
+ return;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Fcopy request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
+
+ icmsghdr = (struct icmsg_hdr *)&recv_buffer[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdr,
+ recv_buffer, recvlen,
+ fw_versions, FW_VER_COUNT,
+ fcopy_versions, FCOPY_VER_COUNT,
+ NULL, &fcopy_srv_version)) {
+
+ pr_info("FCopy IC version %d.%d\n",
+ fcopy_srv_version >> 16,
+ fcopy_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdr->icmsgtype == ICMSGTYPE_FCOPY) {
+ /* Ensure recvlen is big enough to contain hv_fcopy_hdr */
+ if (recvlen < ICMSG_HDR + sizeof(struct hv_fcopy_hdr)) {
+ pr_err_ratelimited("Invalid Fcopy hdr. Packet length too small: %u\n",
+ recvlen);
+ return;
+ }
+ fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[ICMSG_HDR];
+
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
+
+ fcopy_transaction.recv_len = recvlen;
+ fcopy_transaction.recv_req_id = requestid;
+ fcopy_transaction.fcopy_msg = fcopy_msg;
+
+ if (fcopy_transaction.state < HVUTIL_READY) {
+ /* Userspace is not registered yet */
+ fcopy_respond_to_host(HV_E_FAIL);
+ return;
+ }
+ fcopy_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
+
+ /*
+ * Send the information to the user-level daemon.
+ */
+ schedule_work(&fcopy_send_work);
+ schedule_delayed_work(&fcopy_timeout_work,
+ HV_UTIL_TIMEOUT * HZ);
+ return;
+ } else {
+ pr_err_ratelimited("Fcopy request received. Invalid msg type: %d\n",
+ icmsghdr->icmsgtype);
+ return;
+ }
+ icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+/* Callback when data is received from userspace */
+static int fcopy_on_msg(void *msg, int len)
+{
+ int *val = (int *)msg;
+
+ if (len != sizeof(int))
+ return -EINVAL;
+
+ if (fcopy_transaction.state == HVUTIL_DEVICE_INIT)
+ return fcopy_handle_handshake(*val);
+
+ if (fcopy_transaction.state != HVUTIL_USERSPACE_REQ)
+ return -EINVAL;
+
+ /*
+ * Complete the transaction by forwarding the result
+ * to the host. But first, cancel the timeout.
+ */
+ if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
+ fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
+ fcopy_respond_to_host(*val);
+ hv_poll_channel(fcopy_transaction.recv_channel,
+ fcopy_poll_wrapper);
+ }
+
+ return 0;
+}
+
+static void fcopy_on_reset(void)
+{
+ /*
+ * The daemon has exited; reset the state.
+ */
+ fcopy_transaction.state = HVUTIL_DEVICE_INIT;
+
+ if (cancel_delayed_work_sync(&fcopy_timeout_work))
+ fcopy_respond_to_host(HV_E_FAIL);
+}
+
+int hv_fcopy_init(struct hv_util_service *srv)
+{
+ recv_buffer = srv->recv_buffer;
+ fcopy_transaction.recv_channel = srv->channel;
+ fcopy_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
+
+ /*
+ * When this driver loads, the user level daemon that
+ * processes the host requests may not yet be running.
+ * Defer processing channel callbacks until the daemon
+ * has registered.
+ */
+ fcopy_transaction.state = HVUTIL_DEVICE_INIT;
+
+ hvt = hvutil_transport_init(fcopy_devname, 0, 0,
+ fcopy_on_msg, fcopy_on_reset);
+ if (!hvt)
+ return -EFAULT;
+
+ return 0;
+}
+
+static void hv_fcopy_cancel_work(void)
+{
+ cancel_delayed_work_sync(&fcopy_timeout_work);
+ cancel_work_sync(&fcopy_send_work);
+}
+
+int hv_fcopy_pre_suspend(void)
+{
+ struct vmbus_channel *channel = fcopy_transaction.recv_channel;
+ struct hv_fcopy_hdr *fcopy_msg;
+
+ /*
+ * Fake a CANCEL_FCOPY message for the user space daemon in case the
+ * daemon is in the middle of copying some file. It doesn't matter if
+ * there is already a message pending to be delivered to the user
+ * space since we force fcopy_transaction.state to be HVUTIL_READY, so
+ * the user space daemon's write() will fail with EINVAL (see
+ * fcopy_on_msg()), and the daemon will reset the device by closing
+ * and re-opening it.
+ */
+ fcopy_msg = kzalloc(sizeof(*fcopy_msg), GFP_KERNEL);
+ if (!fcopy_msg)
+ return -ENOMEM;
+
+ tasklet_disable(&channel->callback_event);
+
+ fcopy_msg->operation = CANCEL_FCOPY;
+
+ hv_fcopy_cancel_work();
+
+ /* We don't care about the return value. */
+ hvutil_transport_send(hvt, fcopy_msg, sizeof(*fcopy_msg), NULL);
+
+ kfree(fcopy_msg);
+
+ fcopy_transaction.state = HVUTIL_READY;
+
+ /* tasklet_enable() will be called in hv_fcopy_pre_resume(). */
+ return 0;
+}
+
+int hv_fcopy_pre_resume(void)
+{
+ struct vmbus_channel *channel = fcopy_transaction.recv_channel;
+
+ tasklet_enable(&channel->callback_event);
+
+ return 0;
+}
+
+void hv_fcopy_deinit(void)
+{
+ fcopy_transaction.state = HVUTIL_DEVICE_DYING;
+
+ hv_fcopy_cancel_work();
+
+ hvutil_transport_destroy(hvt);
+}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
new file mode 100644
index 000000000..d35b60c06
--- /dev/null
+++ b/drivers/hv/hv_kvp.c
@@ -0,0 +1,824 @@
+/*
+ * An implementation of key value pair (KVP) functionality for Linux.
+ *
+ *
+ * Copyright (C) 2010, Novell, Inc.
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/net.h>
+#include <linux/nls.h>
+#include <linux/connector.h>
+#include <linux/workqueue.h>
+#include <linux/hyperv.h>
+#include <asm/hyperv-tlfs.h>
+
+#include "hyperv_vmbus.h"
+#include "hv_utils_transport.h"
+
+/*
+ * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
+ */
+#define WS2008_SRV_MAJOR 1
+#define WS2008_SRV_MINOR 0
+#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
+
+#define WIN7_SRV_MAJOR 3
+#define WIN7_SRV_MINOR 0
+#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
+
+#define WIN8_SRV_MAJOR 4
+#define WIN8_SRV_MINOR 0
+#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+
+#define KVP_VER_COUNT 3
+static const int kvp_versions[] = {
+ WIN8_SRV_VERSION,
+ WIN7_SRV_VERSION,
+ WS2008_SRV_VERSION
+};
+
+#define FW_VER_COUNT 2
+static const int fw_versions[] = {
+ UTIL_FW_VERSION,
+ UTIL_WS2K8_FW_VERSION
+};
+
+/*
+ * Global state maintained for transaction that is being processed. For a class
+ * of integration services, including the "KVP service", the specified protocol
+ * is a "request/response" protocol which means that there can only be single
+ * outstanding transaction from the host at any given point in time. We use
+ * this to simplify memory management in this driver - we cache and process
+ * only one message at a time.
+ *
+ * While the request/response protocol is guaranteed by the host, we further
+ * ensure this by serializing packet processing in this driver - we do not
+ * read additional packets from the VMBUS until the current packet is fully
+ * handled.
+ */
+
+static struct {
+ int state; /* hvutil_device_state */
+ int recv_len; /* number of bytes received. */
+ struct hv_kvp_msg *kvp_msg; /* current message */
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+} kvp_transaction;
+
+/*
+ * This state maintains the version number registered by the daemon.
+ */
+static int dm_reg_value;
+
+static void kvp_send_key(struct work_struct *dummy);
+
+
+static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
+static void kvp_timeout_func(struct work_struct *dummy);
+static void kvp_host_handshake_func(struct work_struct *dummy);
+static void kvp_register(int);
+
+static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func);
+static DECLARE_DELAYED_WORK(kvp_host_handshake_work, kvp_host_handshake_func);
+static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
+
+static const char kvp_devname[] = "vmbus/hv_kvp";
+static u8 *recv_buffer;
+static struct hvutil_transport *hvt;
+/*
+ * Register the kernel component with the user-level daemon.
+ * As part of this registration, pass the LIC version number.
+ * This number has no meaning, it satisfies the registration protocol.
+ */
+#define HV_DRV_VERSION "3.1"
+
+static void kvp_poll_wrapper(void *channel)
+{
+ /* Transaction is finished, reset the state here to avoid races. */
+ kvp_transaction.state = HVUTIL_READY;
+ tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
+}
+
+static void kvp_register_done(void)
+{
+ /*
+ * If we're still negotiating with the host cancel the timeout
+ * work to not poll the channel twice.
+ */
+ pr_debug("KVP: userspace daemon registered\n");
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+}
+
+static void
+kvp_register(int reg_value)
+{
+
+ struct hv_kvp_msg *kvp_msg;
+ char *version;
+
+ kvp_msg = kzalloc(sizeof(*kvp_msg), GFP_KERNEL);
+
+ if (kvp_msg) {
+ version = kvp_msg->body.kvp_register.version;
+ kvp_msg->kvp_hdr.operation = reg_value;
+ strcpy(version, HV_DRV_VERSION);
+
+ hvutil_transport_send(hvt, kvp_msg, sizeof(*kvp_msg),
+ kvp_register_done);
+ kfree(kvp_msg);
+ }
+}
+
+static void kvp_timeout_func(struct work_struct *dummy)
+{
+ /*
+ * If the timer fires, the user-mode component has not responded;
+ * process the pending transaction.
+ */
+ kvp_respond_to_host(NULL, HV_E_FAIL);
+
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+}
+
+static void kvp_host_handshake_func(struct work_struct *dummy)
+{
+ tasklet_schedule(&kvp_transaction.recv_channel->callback_event);
+}
+
+static int kvp_handle_handshake(struct hv_kvp_msg *msg)
+{
+ switch (msg->kvp_hdr.operation) {
+ case KVP_OP_REGISTER:
+ dm_reg_value = KVP_OP_REGISTER;
+ pr_info("KVP: IP injection functionality not available\n");
+ pr_info("KVP: Upgrade the KVP daemon\n");
+ break;
+ case KVP_OP_REGISTER1:
+ dm_reg_value = KVP_OP_REGISTER1;
+ break;
+ default:
+ pr_info("KVP: incompatible daemon\n");
+ pr_info("KVP: KVP version: %d, Daemon version: %d\n",
+ KVP_OP_REGISTER1, msg->kvp_hdr.operation);
+ return -EINVAL;
+ }
+
+ /*
+ * We have a compatible daemon; complete the handshake.
+ */
+ pr_debug("KVP: userspace daemon ver. %d connected\n",
+ msg->kvp_hdr.operation);
+ kvp_register(dm_reg_value);
+
+ return 0;
+}
+
+
+/*
+ * Callback when data is received from user mode.
+ */
+
+static int kvp_on_msg(void *msg, int len)
+{
+ struct hv_kvp_msg *message = (struct hv_kvp_msg *)msg;
+ struct hv_kvp_msg_enumerate *data;
+ int error = 0;
+
+ if (len < sizeof(*message))
+ return -EINVAL;
+
+ /*
+ * If we are negotiating the version information
+ * with the daemon; handle that first.
+ */
+
+ if (kvp_transaction.state < HVUTIL_READY) {
+ return kvp_handle_handshake(message);
+ }
+
+ /* We didn't send anything to userspace so the reply is spurious */
+ if (kvp_transaction.state < HVUTIL_USERSPACE_REQ)
+ return -EINVAL;
+
+ kvp_transaction.state = HVUTIL_USERSPACE_RECV;
+
+ /*
+ * Based on the version of the daemon, we propagate errors from the
+ * daemon differently.
+ */
+
+ data = &message->body.kvp_enum_data;
+
+ switch (dm_reg_value) {
+ case KVP_OP_REGISTER:
+ /*
+ * Null string is used to pass back error condition.
+ */
+ if (data->data.key[0] == 0)
+ error = HV_S_CONT;
+ break;
+
+ case KVP_OP_REGISTER1:
+ /*
+ * We use the message header information from
+ * the user level daemon to transmit errors.
+ */
+ error = message->error;
+ break;
+ }
+
+ /*
+ * Complete the transaction by forwarding the key value
+ * to the host. But first, cancel the timeout.
+ */
+ if (cancel_delayed_work_sync(&kvp_timeout_work)) {
+ kvp_respond_to_host(message, error);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+ }
+
+ return 0;
+}
+
+
+static int process_ob_ipinfo(void *in_msg, void *out_msg, int op)
+{
+ struct hv_kvp_msg *in = in_msg;
+ struct hv_kvp_ip_msg *out = out_msg;
+ int len;
+
+ switch (op) {
+ case KVP_OP_GET_IP_INFO:
+ /*
+ * Transform all parameters into utf16 encoding.
+ */
+ len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.ip_addr,
+ strlen((char *)in->body.kvp_ip_val.ip_addr),
+ UTF16_HOST_ENDIAN,
+ (wchar_t *)out->kvp_ip_val.ip_addr,
+ MAX_IP_ADDR_SIZE);
+ if (len < 0)
+ return len;
+
+ len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.sub_net,
+ strlen((char *)in->body.kvp_ip_val.sub_net),
+ UTF16_HOST_ENDIAN,
+ (wchar_t *)out->kvp_ip_val.sub_net,
+ MAX_IP_ADDR_SIZE);
+ if (len < 0)
+ return len;
+
+ len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.gate_way,
+ strlen((char *)in->body.kvp_ip_val.gate_way),
+ UTF16_HOST_ENDIAN,
+ (wchar_t *)out->kvp_ip_val.gate_way,
+ MAX_GATEWAY_SIZE);
+ if (len < 0)
+ return len;
+
+ len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.dns_addr,
+ strlen((char *)in->body.kvp_ip_val.dns_addr),
+ UTF16_HOST_ENDIAN,
+ (wchar_t *)out->kvp_ip_val.dns_addr,
+ MAX_IP_ADDR_SIZE);
+ if (len < 0)
+ return len;
+
+ len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.adapter_id,
+ strlen((char *)in->body.kvp_ip_val.adapter_id),
+ UTF16_HOST_ENDIAN,
+ (wchar_t *)out->kvp_ip_val.adapter_id,
+ MAX_ADAPTER_ID_SIZE);
+ if (len < 0)
+ return len;
+
+ out->kvp_ip_val.dhcp_enabled =
+ in->body.kvp_ip_val.dhcp_enabled;
+ out->kvp_ip_val.addr_family =
+ in->body.kvp_ip_val.addr_family;
+ }
+
+ return 0;
+}
+
+static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
+{
+ struct hv_kvp_ip_msg *in = in_msg;
+ struct hv_kvp_msg *out = out_msg;
+
+ switch (op) {
+ case KVP_OP_SET_IP_INFO:
+ /*
+ * Transform all parameters into utf8 encoding.
+ */
+ utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.ip_addr,
+ MAX_IP_ADDR_SIZE,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)out->body.kvp_ip_val.ip_addr,
+ MAX_IP_ADDR_SIZE);
+
+ utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.sub_net,
+ MAX_IP_ADDR_SIZE,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)out->body.kvp_ip_val.sub_net,
+ MAX_IP_ADDR_SIZE);
+
+ utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.gate_way,
+ MAX_GATEWAY_SIZE,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)out->body.kvp_ip_val.gate_way,
+ MAX_GATEWAY_SIZE);
+
+ utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.dns_addr,
+ MAX_IP_ADDR_SIZE,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)out->body.kvp_ip_val.dns_addr,
+ MAX_IP_ADDR_SIZE);
+
+ out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
+
+ fallthrough;
+
+ case KVP_OP_GET_IP_INFO:
+ utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
+ MAX_ADAPTER_ID_SIZE,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)out->body.kvp_ip_val.adapter_id,
+ MAX_ADAPTER_ID_SIZE);
+
+ out->body.kvp_ip_val.addr_family = in->kvp_ip_val.addr_family;
+ }
+}
+
+
+
+
+static void
+kvp_send_key(struct work_struct *dummy)
+{
+ struct hv_kvp_msg *message;
+ struct hv_kvp_msg *in_msg;
+ __u8 operation = kvp_transaction.kvp_msg->kvp_hdr.operation;
+ __u8 pool = kvp_transaction.kvp_msg->kvp_hdr.pool;
+ __u32 val32;
+ __u64 val64;
+ int rc;
+
+ /* The transaction state is wrong. */
+ if (kvp_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
+ return;
+
+ message = kzalloc(sizeof(*message), GFP_KERNEL);
+ if (!message)
+ return;
+
+ message->kvp_hdr.operation = operation;
+ message->kvp_hdr.pool = pool;
+ in_msg = kvp_transaction.kvp_msg;
+
+ /*
+ * The key/value strings sent from the host are encoded
+ * in utf16; convert it to utf8 strings.
+ * The host assures us that the utf16 strings will not exceed
+ * the max lengths specified. We will however, reserve room
+ * for the string terminating character - in the utf16s_utf8s()
+ * function we limit the size of the buffer where the converted
+ * string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to guarantee
+ * that the strings can be properly terminated!
+ */
+
+ switch (message->kvp_hdr.operation) {
+ case KVP_OP_SET_IP_INFO:
+ process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
+ break;
+ case KVP_OP_GET_IP_INFO:
+ /*
+ * We only need to pass on the info of operation, adapter_id
+ * and addr_family to the userland kvp daemon.
+ */
+ process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
+ break;
+ case KVP_OP_SET:
+ switch (in_msg->body.kvp_set.data.value_type) {
+ case REG_SZ:
+ /*
+ * The value is a string - utf16 encoding.
+ */
+ message->body.kvp_set.data.value_size =
+ utf16s_to_utf8s(
+ (wchar_t *)in_msg->body.kvp_set.data.value,
+ in_msg->body.kvp_set.data.value_size,
+ UTF16_LITTLE_ENDIAN,
+ message->body.kvp_set.data.value,
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1;
+ break;
+
+ case REG_U32:
+ /*
+ * The value is a 32 bit scalar.
+ * We save this as a utf8 string.
+ */
+ val32 = in_msg->body.kvp_set.data.value_u32;
+ message->body.kvp_set.data.value_size =
+ sprintf(message->body.kvp_set.data.value,
+ "%u", val32) + 1;
+ break;
+
+ case REG_U64:
+ /*
+ * The value is a 64 bit scalar.
+ * We save this as a utf8 string.
+ */
+ val64 = in_msg->body.kvp_set.data.value_u64;
+ message->body.kvp_set.data.value_size =
+ sprintf(message->body.kvp_set.data.value,
+ "%llu", val64) + 1;
+ break;
+
+ }
+
+ /*
+ * The key is always a string - utf16 encoding.
+ */
+ message->body.kvp_set.data.key_size =
+ utf16s_to_utf8s(
+ (wchar_t *)in_msg->body.kvp_set.data.key,
+ in_msg->body.kvp_set.data.key_size,
+ UTF16_LITTLE_ENDIAN,
+ message->body.kvp_set.data.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
+
+ break;
+
+ case KVP_OP_GET:
+ message->body.kvp_get.data.key_size =
+ utf16s_to_utf8s(
+ (wchar_t *)in_msg->body.kvp_get.data.key,
+ in_msg->body.kvp_get.data.key_size,
+ UTF16_LITTLE_ENDIAN,
+ message->body.kvp_get.data.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
+ break;
+
+ case KVP_OP_DELETE:
+ message->body.kvp_delete.key_size =
+ utf16s_to_utf8s(
+ (wchar_t *)in_msg->body.kvp_delete.key,
+ in_msg->body.kvp_delete.key_size,
+ UTF16_LITTLE_ENDIAN,
+ message->body.kvp_delete.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
+ break;
+
+ case KVP_OP_ENUMERATE:
+ message->body.kvp_enum_data.index =
+ in_msg->body.kvp_enum_data.index;
+ break;
+ }
+
+ kvp_transaction.state = HVUTIL_USERSPACE_REQ;
+ rc = hvutil_transport_send(hvt, message, sizeof(*message), NULL);
+ if (rc) {
+ pr_debug("KVP: failed to communicate to the daemon: %d\n", rc);
+ if (cancel_delayed_work_sync(&kvp_timeout_work)) {
+ kvp_respond_to_host(message, HV_E_FAIL);
+ kvp_transaction.state = HVUTIL_READY;
+ }
+ }
+
+ kfree(message);
+}
+
+/*
+ * Send a response back to the host.
+ */
+
+static void
+kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
+{
+ struct hv_kvp_msg *kvp_msg;
+ struct hv_kvp_exchg_msg_value *kvp_data;
+ char *key_name;
+ char *value;
+ struct icmsg_hdr *icmsghdrp;
+ int keylen = 0;
+ int valuelen = 0;
+ u32 buf_len;
+ struct vmbus_channel *channel;
+ u64 req_id;
+ int ret;
+
+ /*
+ * Copy the global state for completing the transaction. Note that
+ * only one transaction can be active at a time.
+ */
+
+ buf_len = kvp_transaction.recv_len;
+ channel = kvp_transaction.recv_channel;
+ req_id = kvp_transaction.recv_req_id;
+
+ icmsghdrp = (struct icmsg_hdr *)
+ &recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (channel->onchannel_callback == NULL)
+ /*
+ * We have raced with util driver being unloaded;
+ * silently return.
+ */
+ return;
+
+ icmsghdrp->status = error;
+
+ /*
+ * If the error parameter is set, terminate the host's enumeration
+ * on this pool.
+ */
+ if (error) {
+ /*
+ * Something failed or we have timed out;
+ * terminate the current host-side iteration.
+ */
+ goto response_done;
+ }
+
+ kvp_msg = (struct hv_kvp_msg *)
+ &recv_buffer[sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ switch (kvp_transaction.kvp_msg->kvp_hdr.operation) {
+ case KVP_OP_GET_IP_INFO:
+ ret = process_ob_ipinfo(msg_to_host,
+ (struct hv_kvp_ip_msg *)kvp_msg,
+ KVP_OP_GET_IP_INFO);
+ if (ret < 0)
+ icmsghdrp->status = HV_E_FAIL;
+
+ goto response_done;
+ case KVP_OP_SET_IP_INFO:
+ goto response_done;
+ case KVP_OP_GET:
+ kvp_data = &kvp_msg->body.kvp_get.data;
+ goto copy_value;
+
+ case KVP_OP_SET:
+ case KVP_OP_DELETE:
+ goto response_done;
+
+ default:
+ break;
+ }
+
+ kvp_data = &kvp_msg->body.kvp_enum_data.data;
+ key_name = msg_to_host->body.kvp_enum_data.data.key;
+
+ /*
+ * The windows host expects the key/value pair to be encoded
+ * in utf16. Ensure that the key/value size reported to the host
+ * will be less than or equal to the MAX size (including the
+ * terminating character).
+ */
+ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->key,
+ (HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2) - 2);
+ kvp_data->key_size = 2*(keylen + 1); /* utf16 encoding */
+
+copy_value:
+ value = msg_to_host->body.kvp_enum_data.data.value;
+ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->value,
+ (HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2) - 2);
+ kvp_data->value_size = 2*(valuelen + 1); /* utf16 encoding */
+
+ /*
+ * If the utf8s to utf16s conversion failed; notify host
+ * of the error.
+ */
+ if ((keylen < 0) || (valuelen < 0))
+ icmsghdrp->status = HV_E_FAIL;
+
+ kvp_data->value_type = REG_SZ; /* all our values are strings */
+
+response_done:
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+/*
+ * This callback is invoked when we get a KVP message from the host.
+ * The host ensures that only one KVP transaction can be active at a time.
+ * KVP implementation in Linux needs to forward the key to a user-mde
+ * component to retrieve the corresponding value. Consequently, we cannot
+ * respond to the host in the context of this callback. Since the host
+ * guarantees that at most only one transaction can be active at a time,
+ * we stash away the transaction state in a set of global variables.
+ */
+
+void hv_kvp_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+
+ struct hv_kvp_msg *kvp_msg;
+
+ struct icmsg_hdr *icmsghdrp;
+ int kvp_srv_version;
+ static enum {NEGO_NOT_STARTED,
+ NEGO_IN_PROGRESS,
+ NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
+
+ if (kvp_transaction.state < HVUTIL_READY) {
+ /*
+ * If userspace daemon is not connected and host is asking
+ * us to negotiate we need to delay to not lose messages.
+ * This is important for Failover IP setting.
+ */
+ if (host_negotiatied == NEGO_NOT_STARTED) {
+ host_negotiatied = NEGO_IN_PROGRESS;
+ schedule_delayed_work(&kvp_host_handshake_work,
+ HV_UTIL_NEGO_TIMEOUT * HZ);
+ }
+ return;
+ }
+ if (kvp_transaction.state > HVUTIL_READY)
+ return;
+
+ if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen, &requestid)) {
+ pr_err_ratelimited("KVP request received. Could not read into recv buf\n");
+ return;
+ }
+
+ if (!recvlen)
+ return;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("KVP request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
+
+ icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ recv_buffer, recvlen,
+ fw_versions, FW_VER_COUNT,
+ kvp_versions, KVP_VER_COUNT,
+ NULL, &kvp_srv_version)) {
+ pr_info("KVP IC version %d.%d\n",
+ kvp_srv_version >> 16,
+ kvp_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_KVPEXCHANGE) {
+ /*
+ * recvlen is not checked against sizeof(struct kvp_msg) because kvp_msg contains
+ * a union of structs and the msg type received is not known. Code using this
+ * struct should provide validation when accessing its fields.
+ */
+ kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ICMSG_HDR];
+
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
+
+ kvp_transaction.recv_len = recvlen;
+ kvp_transaction.recv_req_id = requestid;
+ kvp_transaction.kvp_msg = kvp_msg;
+
+ if (kvp_transaction.state < HVUTIL_READY) {
+ /* Userspace is not registered yet */
+ kvp_respond_to_host(NULL, HV_E_FAIL);
+ return;
+ }
+ kvp_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
+
+ /*
+ * Get the information from the
+ * user-mode component.
+ * component. This transaction will be
+ * completed when we get the value from
+ * the user-mode component.
+ * Set a timeout to deal with
+ * user-mode not responding.
+ */
+ schedule_work(&kvp_sendkey_work);
+ schedule_delayed_work(&kvp_timeout_work,
+ HV_UTIL_TIMEOUT * HZ);
+
+ return;
+
+ } else {
+ pr_err_ratelimited("KVP request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
+ return;
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, recv_buffer,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+
+ host_negotiatied = NEGO_FINISHED;
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+}
+
+static void kvp_on_reset(void)
+{
+ if (cancel_delayed_work_sync(&kvp_timeout_work))
+ kvp_respond_to_host(NULL, HV_E_FAIL);
+ kvp_transaction.state = HVUTIL_DEVICE_INIT;
+}
+
+int
+hv_kvp_init(struct hv_util_service *srv)
+{
+ recv_buffer = srv->recv_buffer;
+ kvp_transaction.recv_channel = srv->channel;
+ kvp_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 4;
+
+ /*
+ * When this driver loads, the user level daemon that
+ * processes the host requests may not yet be running.
+ * Defer processing channel callbacks until the daemon
+ * has registered.
+ */
+ kvp_transaction.state = HVUTIL_DEVICE_INIT;
+
+ hvt = hvutil_transport_init(kvp_devname, CN_KVP_IDX, CN_KVP_VAL,
+ kvp_on_msg, kvp_on_reset);
+ if (!hvt)
+ return -EFAULT;
+
+ return 0;
+}
+
+static void hv_kvp_cancel_work(void)
+{
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
+ cancel_delayed_work_sync(&kvp_timeout_work);
+ cancel_work_sync(&kvp_sendkey_work);
+}
+
+int hv_kvp_pre_suspend(void)
+{
+ struct vmbus_channel *channel = kvp_transaction.recv_channel;
+
+ tasklet_disable(&channel->callback_event);
+
+ /*
+ * If there is a pending transtion, it's unnecessary to tell the host
+ * that the transaction will fail, because that is implied when
+ * util_suspend() calls vmbus_close() later.
+ */
+ hv_kvp_cancel_work();
+
+ /*
+ * Forece the state to READY to handle the ICMSGTYPE_NEGOTIATE message
+ * later. The user space daemon may go out of order and its write()
+ * may fail with EINVAL: this doesn't matter since the daemon will
+ * reset the device by closing and re-opening it.
+ */
+ kvp_transaction.state = HVUTIL_READY;
+ return 0;
+}
+
+int hv_kvp_pre_resume(void)
+{
+ struct vmbus_channel *channel = kvp_transaction.recv_channel;
+
+ tasklet_enable(&channel->callback_event);
+
+ return 0;
+}
+
+void hv_kvp_deinit(void)
+{
+ kvp_transaction.state = HVUTIL_DEVICE_DYING;
+
+ hv_kvp_cancel_work();
+
+ hvutil_transport_destroy(hvt);
+}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
new file mode 100644
index 000000000..0d2184be1
--- /dev/null
+++ b/drivers/hv/hv_snapshot.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * An implementation of host initiated guest snapshot.
+ *
+ * Copyright (C) 2013, Microsoft, Inc.
+ * Author : K. Y. Srinivasan <kys@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/net.h>
+#include <linux/nls.h>
+#include <linux/connector.h>
+#include <linux/workqueue.h>
+#include <linux/hyperv.h>
+#include <asm/hyperv-tlfs.h>
+
+#include "hyperv_vmbus.h"
+#include "hv_utils_transport.h"
+
+#define VSS_MAJOR 5
+#define VSS_MINOR 0
+#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
+
+#define VSS_VER_COUNT 1
+static const int vss_versions[] = {
+ VSS_VERSION
+};
+
+#define FW_VER_COUNT 1
+static const int fw_versions[] = {
+ UTIL_FW_VERSION
+};
+
+/* See comment with struct hv_vss_msg regarding the max VMbus packet size */
+#define VSS_MAX_PKT_SIZE (HV_HYP_PAGE_SIZE * 2)
+
+/*
+ * Timeout values are based on expecations from host
+ */
+#define VSS_FREEZE_TIMEOUT (15 * 60)
+
+/*
+ * Global state maintained for transaction that is being processed. For a class
+ * of integration services, including the "VSS service", the specified protocol
+ * is a "request/response" protocol which means that there can only be single
+ * outstanding transaction from the host at any given point in time. We use
+ * this to simplify memory management in this driver - we cache and process
+ * only one message at a time.
+ *
+ * While the request/response protocol is guaranteed by the host, we further
+ * ensure this by serializing packet processing in this driver - we do not
+ * read additional packets from the VMBUs until the current packet is fully
+ * handled.
+ */
+
+static struct {
+ int state; /* hvutil_device_state */
+ int recv_len; /* number of bytes received. */
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+ struct hv_vss_msg *msg; /* current message */
+} vss_transaction;
+
+
+static void vss_respond_to_host(int error);
+
+/*
+ * This state maintains the version number registered by the daemon.
+ */
+static int dm_reg_value;
+
+static const char vss_devname[] = "vmbus/hv_vss";
+static __u8 *recv_buffer;
+static struct hvutil_transport *hvt;
+
+static void vss_timeout_func(struct work_struct *dummy);
+static void vss_handle_request(struct work_struct *dummy);
+
+static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
+static DECLARE_WORK(vss_handle_request_work, vss_handle_request);
+
+static void vss_poll_wrapper(void *channel)
+{
+ /* Transaction is finished, reset the state here to avoid races. */
+ vss_transaction.state = HVUTIL_READY;
+ tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
+}
+
+/*
+ * Callback when data is received from user mode.
+ */
+
+static void vss_timeout_func(struct work_struct *dummy)
+{
+ /*
+ * Timeout waiting for userspace component to reply happened.
+ */
+ pr_warn("VSS: timeout waiting for daemon to reply\n");
+ vss_respond_to_host(HV_E_FAIL);
+
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
+}
+
+static void vss_register_done(void)
+{
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
+ pr_debug("VSS: userspace daemon registered\n");
+}
+
+static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
+{
+ u32 our_ver = VSS_OP_REGISTER1;
+
+ switch (vss_msg->vss_hdr.operation) {
+ case VSS_OP_REGISTER:
+ /* Daemon doesn't expect us to reply */
+ dm_reg_value = VSS_OP_REGISTER;
+ break;
+ case VSS_OP_REGISTER1:
+ /* Daemon expects us to reply with our own version */
+ if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
+ vss_register_done))
+ return -EFAULT;
+ dm_reg_value = VSS_OP_REGISTER1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
+ return 0;
+}
+
+static int vss_on_msg(void *msg, int len)
+{
+ struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
+
+ if (len != sizeof(*vss_msg)) {
+ pr_debug("VSS: Message size does not match length\n");
+ return -EINVAL;
+ }
+
+ if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
+ vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
+ /*
+ * Don't process registration messages if we're in the middle
+ * of a transaction processing.
+ */
+ if (vss_transaction.state > HVUTIL_READY) {
+ pr_debug("VSS: Got unexpected registration request\n");
+ return -EINVAL;
+ }
+
+ return vss_handle_handshake(vss_msg);
+ } else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
+ vss_transaction.state = HVUTIL_USERSPACE_RECV;
+
+ if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP)
+ vss_transaction.msg->vss_cf.flags =
+ VSS_HBU_NO_AUTO_RECOVERY;
+
+ if (cancel_delayed_work_sync(&vss_timeout_work)) {
+ vss_respond_to_host(vss_msg->error);
+ /* Transaction is finished, reset the state. */
+ hv_poll_channel(vss_transaction.recv_channel,
+ vss_poll_wrapper);
+ }
+ } else {
+ /* This is a spurious call! */
+ pr_debug("VSS: Transaction not active\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vss_send_op(void)
+{
+ int op = vss_transaction.msg->vss_hdr.operation;
+ int rc;
+ struct hv_vss_msg *vss_msg;
+
+ /* The transaction state is wrong. */
+ if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
+ pr_debug("VSS: Unexpected attempt to send to daemon\n");
+ return;
+ }
+
+ vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
+ if (!vss_msg)
+ return;
+
+ vss_msg->vss_hdr.operation = op;
+
+ vss_transaction.state = HVUTIL_USERSPACE_REQ;
+
+ schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
+ VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
+
+ rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
+ if (rc) {
+ pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
+ if (cancel_delayed_work_sync(&vss_timeout_work)) {
+ vss_respond_to_host(HV_E_FAIL);
+ vss_transaction.state = HVUTIL_READY;
+ }
+ }
+
+ kfree(vss_msg);
+}
+
+static void vss_handle_request(struct work_struct *dummy)
+{
+ switch (vss_transaction.msg->vss_hdr.operation) {
+ /*
+ * Initiate a "freeze/thaw" operation in the guest.
+ * We respond to the host once the operation is complete.
+ *
+ * We send the message to the user space daemon and the operation is
+ * performed in the daemon.
+ */
+ case VSS_OP_THAW:
+ case VSS_OP_FREEZE:
+ case VSS_OP_HOT_BACKUP:
+ if (vss_transaction.state < HVUTIL_READY) {
+ /* Userspace is not registered yet */
+ pr_debug("VSS: Not ready for request.\n");
+ vss_respond_to_host(HV_E_FAIL);
+ return;
+ }
+
+ pr_debug("VSS: Received request for op code: %d\n",
+ vss_transaction.msg->vss_hdr.operation);
+ vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
+ vss_send_op();
+ return;
+ case VSS_OP_GET_DM_INFO:
+ vss_transaction.msg->dm_info.flags = 0;
+ break;
+ default:
+ break;
+ }
+
+ vss_respond_to_host(0);
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
+}
+
+/*
+ * Send a response back to the host.
+ */
+
+static void
+vss_respond_to_host(int error)
+{
+ struct icmsg_hdr *icmsghdrp;
+ u32 buf_len;
+ struct vmbus_channel *channel;
+ u64 req_id;
+
+ /*
+ * Copy the global state for completing the transaction. Note that
+ * only one transaction can be active at a time.
+ */
+
+ buf_len = vss_transaction.recv_len;
+ channel = vss_transaction.recv_channel;
+ req_id = vss_transaction.recv_req_id;
+
+ icmsghdrp = (struct icmsg_hdr *)
+ &recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (channel->onchannel_callback == NULL)
+ /*
+ * We have raced with util driver being unloaded;
+ * silently return.
+ */
+ return;
+
+ icmsghdrp->status = error;
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
+ VM_PKT_DATA_INBAND, 0);
+
+}
+
+/*
+ * This callback is invoked when we get a VSS message from the host.
+ * The host ensures that only one VSS transaction can be active at a time.
+ */
+
+void hv_vss_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct hv_vss_msg *vss_msg;
+ int vss_srv_version;
+
+ struct icmsg_hdr *icmsghdrp;
+
+ if (vss_transaction.state > HVUTIL_READY)
+ return;
+
+ if (vmbus_recvpacket(channel, recv_buffer, VSS_MAX_PKT_SIZE, &recvlen, &requestid)) {
+ pr_err_ratelimited("VSS request received. Could not read into recv buf\n");
+ return;
+ }
+
+ if (!recvlen)
+ return;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("VSS request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
+
+ icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ recv_buffer, recvlen,
+ fw_versions, FW_VER_COUNT,
+ vss_versions, VSS_VER_COUNT,
+ NULL, &vss_srv_version)) {
+
+ pr_info("VSS IC version %d.%d\n",
+ vss_srv_version >> 16,
+ vss_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_VSS) {
+ /* Ensure recvlen is big enough to contain hv_vss_msg */
+ if (recvlen < ICMSG_HDR + sizeof(struct hv_vss_msg)) {
+ pr_err_ratelimited("Invalid VSS msg. Packet length too small: %u\n",
+ recvlen);
+ return;
+ }
+ vss_msg = (struct hv_vss_msg *)&recv_buffer[ICMSG_HDR];
+
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
+
+ vss_transaction.recv_len = recvlen;
+ vss_transaction.recv_req_id = requestid;
+ vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
+
+ schedule_work(&vss_handle_request_work);
+ return;
+ } else {
+ pr_err_ratelimited("VSS request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
+ return;
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
+ ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+static void vss_on_reset(void)
+{
+ if (cancel_delayed_work_sync(&vss_timeout_work))
+ vss_respond_to_host(HV_E_FAIL);
+ vss_transaction.state = HVUTIL_DEVICE_INIT;
+}
+
+int
+hv_vss_init(struct hv_util_service *srv)
+{
+ if (vmbus_proto_version < VERSION_WIN8_1) {
+ pr_warn("Integration service 'Backup (volume snapshot)'"
+ " not supported on this host version.\n");
+ return -ENOTSUPP;
+ }
+ recv_buffer = srv->recv_buffer;
+ vss_transaction.recv_channel = srv->channel;
+ vss_transaction.recv_channel->max_pkt_size = VSS_MAX_PKT_SIZE;
+
+ /*
+ * When this driver loads, the user level daemon that
+ * processes the host requests may not yet be running.
+ * Defer processing channel callbacks until the daemon
+ * has registered.
+ */
+ vss_transaction.state = HVUTIL_DEVICE_INIT;
+
+ hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
+ vss_on_msg, vss_on_reset);
+ if (!hvt) {
+ pr_warn("VSS: Failed to initialize transport\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void hv_vss_cancel_work(void)
+{
+ cancel_delayed_work_sync(&vss_timeout_work);
+ cancel_work_sync(&vss_handle_request_work);
+}
+
+int hv_vss_pre_suspend(void)
+{
+ struct vmbus_channel *channel = vss_transaction.recv_channel;
+ struct hv_vss_msg *vss_msg;
+
+ /*
+ * Fake a THAW message for the user space daemon in case the daemon
+ * has frozen the file systems. It doesn't matter if there is already
+ * a message pending to be delivered to the user space since we force
+ * vss_transaction.state to be HVUTIL_READY, so the user space daemon's
+ * write() will fail with EINVAL (see vss_on_msg()), and the daemon
+ * will reset the device by closing and re-opening it.
+ */
+ vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
+ if (!vss_msg)
+ return -ENOMEM;
+
+ tasklet_disable(&channel->callback_event);
+
+ vss_msg->vss_hdr.operation = VSS_OP_THAW;
+
+ /* Cancel any possible pending work. */
+ hv_vss_cancel_work();
+
+ /* We don't care about the return value. */
+ hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
+
+ kfree(vss_msg);
+
+ vss_transaction.state = HVUTIL_READY;
+
+ /* tasklet_enable() will be called in hv_vss_pre_resume(). */
+ return 0;
+}
+
+int hv_vss_pre_resume(void)
+{
+ struct vmbus_channel *channel = vss_transaction.recv_channel;
+
+ tasklet_enable(&channel->callback_event);
+
+ return 0;
+}
+
+void hv_vss_deinit(void)
+{
+ vss_transaction.state = HVUTIL_DEVICE_DYING;
+
+ hv_vss_cancel_work();
+
+ hvutil_transport_destroy(hvt);
+}
diff --git a/drivers/hv/hv_trace.c b/drivers/hv/hv_trace.c
new file mode 100644
index 000000000..38d359cf1
--- /dev/null
+++ b/drivers/hv/hv_trace.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "hyperv_vmbus.h"
+
+#define CREATE_TRACE_POINTS
+#include "hv_trace.h"
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
new file mode 100644
index 000000000..c02a1719e
--- /dev/null
+++ b/drivers/hv/hv_trace.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hyperv
+
+#if !defined(_HV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _HV_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(vmbus_hdr_msg,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr),
+ TP_STRUCT__entry(__field(unsigned int, msgtype)),
+ TP_fast_assign(__entry->msgtype = hdr->msgtype;),
+ TP_printk("msgtype=%u", __entry->msgtype)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_msg_dpc,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_message,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+TRACE_EVENT(vmbus_onoffer,
+ TP_PROTO(const struct vmbus_channel_offer_channel *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u8, monitorid)
+ __field(u16, is_ddc_int)
+ __field(u32, connection_id)
+ __array(char, if_type, 16)
+ __array(char, if_instance, 16)
+ __field(u16, chn_flags)
+ __field(u16, mmio_mb)
+ __field(u16, sub_idx)
+ ),
+ TP_fast_assign(__entry->child_relid = offer->child_relid;
+ __entry->monitorid = offer->monitorid;
+ __entry->is_ddc_int = offer->is_dedicated_interrupt;
+ __entry->connection_id = offer->connection_id;
+ export_guid(__entry->if_type, &offer->offer.if_type);
+ export_guid(__entry->if_instance, &offer->offer.if_instance);
+ __entry->chn_flags = offer->offer.chn_flags;
+ __entry->mmio_mb = offer->offer.mmio_megabytes;
+ __entry->sub_idx = offer->offer.sub_channel_index;
+ ),
+ TP_printk("child_relid 0x%x, monitorid 0x%x, is_dedicated %d, "
+ "connection_id 0x%x, if_type %pUl, if_instance %pUl, "
+ "chn_flags 0x%x, mmio_megabytes %d, sub_channel_index %d",
+ __entry->child_relid, __entry->monitorid,
+ __entry->is_ddc_int, __entry->connection_id,
+ __entry->if_type, __entry->if_instance,
+ __entry->chn_flags, __entry->mmio_mb,
+ __entry->sub_idx
+ )
+ );
+
+TRACE_EVENT(vmbus_onoffer_rescind,
+ TP_PROTO(const struct vmbus_channel_rescind_offer *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(__field(u32, child_relid)),
+ TP_fast_assign(__entry->child_relid = offer->child_relid),
+ TP_printk("child_relid 0x%x", __entry->child_relid)
+ );
+
+TRACE_EVENT(vmbus_onopen_result,
+ TP_PROTO(const struct vmbus_channel_open_result *result),
+ TP_ARGS(result),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = result->child_relid;
+ __entry->openid = result->openid;
+ __entry->status = result->status;
+ ),
+ TP_printk("child_relid 0x%x, openid %d, status %d",
+ __entry->child_relid, __entry->openid, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_created,
+ TP_PROTO(const struct vmbus_channel_gpadl_created *gpadlcreated),
+ TP_ARGS(gpadlcreated),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = gpadlcreated->child_relid;
+ __entry->gpadl = gpadlcreated->gpadl;
+ __entry->status = gpadlcreated->creation_status;
+ ),
+ TP_printk("child_relid 0x%x, gpadl 0x%x, creation_status %d",
+ __entry->child_relid, __entry->gpadl, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_onmodifychannel_response,
+ TP_PROTO(const struct vmbus_channel_modifychannel_response *response),
+ TP_ARGS(response),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = response->child_relid;
+ __entry->status = response->status;
+ ),
+ TP_printk("child_relid 0x%x, status %d",
+ __entry->child_relid, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_torndown,
+ TP_PROTO(const struct vmbus_channel_gpadl_torndown *gpadltorndown),
+ TP_ARGS(gpadltorndown),
+ TP_STRUCT__entry(__field(u32, gpadl)),
+ TP_fast_assign(__entry->gpadl = gpadltorndown->gpadl),
+ TP_printk("gpadl 0x%x", __entry->gpadl)
+ );
+
+TRACE_EVENT(vmbus_onversion_response,
+ TP_PROTO(const struct vmbus_channel_version_response *response),
+ TP_ARGS(response),
+ TP_STRUCT__entry(
+ __field(u8, ver)
+ ),
+ TP_fast_assign(__entry->ver = response->version_supported;
+ ),
+ TP_printk("version_supported %d", __entry->ver)
+ );
+
+TRACE_EVENT(vmbus_request_offers,
+ TP_PROTO(int ret),
+ TP_ARGS(ret),
+ TP_STRUCT__entry(__field(int, ret)),
+ TP_fast_assign(__entry->ret = ret),
+ TP_printk("sending ret %d", __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_open,
+ TP_PROTO(const struct vmbus_channel_open_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, gpadlhandle)
+ __field(u32, target_vp)
+ __field(u32, offset)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->openid = msg->openid;
+ __entry->gpadlhandle = msg->ringbuffer_gpadlhandle;
+ __entry->target_vp = msg->target_vp;
+ __entry->offset = msg->downstream_ringbuffer_pageoffset;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, openid %d, "
+ "gpadlhandle 0x%x, target_vp 0x%x, offset 0x%x, ret %d",
+ __entry->child_relid, __entry->openid,
+ __entry->gpadlhandle, __entry->target_vp,
+ __entry->offset, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_close_internal,
+ TP_PROTO(const struct vmbus_channel_close_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d", __entry->child_relid,
+ __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_header,
+ TP_PROTO(const struct vmbus_channel_gpadl_header *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u16, range_buflen)
+ __field(u16, rangecount)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->range_buflen = msg->range_buflen;
+ __entry->rangecount = msg->rangecount;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, range_buflen %d "
+ "rangecount %d, ret %d",
+ __entry->child_relid, __entry->gpadl,
+ __entry->range_buflen, __entry->rangecount, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_body,
+ TP_PROTO(const struct vmbus_channel_gpadl_body *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, msgnumber)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->msgnumber = msg->msgnumber;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending msgnumber %d, gpadl 0x%x, ret %d",
+ __entry->msgnumber, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_teardown_gpadl,
+ TP_PROTO(const struct vmbus_channel_gpadl_teardown *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, ret %d",
+ __entry->child_relid, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_negotiate_version,
+ TP_PROTO(const struct vmbus_channel_initiate_contact *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, ver)
+ __field(u32, target_vcpu)
+ __field(int, ret)
+ __field(u64, int_page)
+ __field(u64, mon_page1)
+ __field(u64, mon_page2)
+ ),
+ TP_fast_assign(
+ __entry->ver = msg->vmbus_version_requested;
+ __entry->target_vcpu = msg->target_vcpu;
+ __entry->int_page = msg->interrupt_page;
+ __entry->mon_page1 = msg->monitor_page1;
+ __entry->mon_page2 = msg->monitor_page2;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending vmbus_version_requested %d, target_vcpu 0x%x, "
+ "pages %llx:%llx:%llx, ret %d",
+ __entry->ver, __entry->target_vcpu, __entry->int_page,
+ __entry->mon_page1, __entry->mon_page2, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_release_relid,
+ TP_PROTO(const struct vmbus_channel_relid_released *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d",
+ __entry->child_relid, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_send_tl_connect_request,
+ TP_PROTO(const struct vmbus_channel_tl_connect_request *msg,
+ int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __array(char, guest_id, 16)
+ __array(char, host_id, 16)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ export_guid(__entry->guest_id, &msg->guest_endpoint_id);
+ export_guid(__entry->host_id, &msg->host_service_id);
+ __entry->ret = ret;
+ ),
+ TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
+ "ret %d",
+ __entry->guest_id, __entry->host_id, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_send_modifychannel,
+ TP_PROTO(const struct vmbus_channel_modifychannel *msg,
+ int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, target_vp)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->target_vp = msg->target_vp;
+ __entry->ret = ret;
+ ),
+ TP_printk("binding child_relid 0x%x to target_vp 0x%x, ret %d",
+ __entry->child_relid, __entry->target_vp, __entry->ret
+ )
+ );
+
+DECLARE_EVENT_CLASS(vmbus_channel,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel),
+ TP_STRUCT__entry(__field(u32, relid)),
+ TP_fast_assign(__entry->relid = channel->offermsg.child_relid),
+ TP_printk("relid 0x%x", __entry->relid)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_chan_sched,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_setevent,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_on_event,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hv_trace
+#endif /* _HV_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/hv/hv_trace_balloon.h b/drivers/hv/hv_trace_balloon.h
new file mode 100644
index 000000000..93082888a
--- /dev/null
+++ b/drivers/hv/hv_trace_balloon.h
@@ -0,0 +1,48 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hyperv
+
+#if !defined(_HV_TRACE_BALLOON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _HV_TRACE_BALLOON_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(balloon_status,
+ TP_PROTO(u64 available, u64 committed,
+ unsigned long vm_memory_committed,
+ unsigned long pages_ballooned,
+ unsigned long pages_added,
+ unsigned long pages_onlined),
+ TP_ARGS(available, committed, vm_memory_committed,
+ pages_ballooned, pages_added, pages_onlined),
+ TP_STRUCT__entry(
+ __field(u64, available)
+ __field(u64, committed)
+ __field(unsigned long, vm_memory_committed)
+ __field(unsigned long, pages_ballooned)
+ __field(unsigned long, pages_added)
+ __field(unsigned long, pages_onlined)
+ ),
+ TP_fast_assign(
+ __entry->available = available;
+ __entry->committed = committed;
+ __entry->vm_memory_committed = vm_memory_committed;
+ __entry->pages_ballooned = pages_ballooned;
+ __entry->pages_added = pages_added;
+ __entry->pages_onlined = pages_onlined;
+ ),
+ TP_printk("available %lld, committed %lld; vm_memory_committed %ld;"
+ " pages_ballooned %ld, pages_added %ld, pages_onlined %ld",
+ __entry->available, __entry->committed,
+ __entry->vm_memory_committed, __entry->pages_ballooned,
+ __entry->pages_added, __entry->pages_onlined
+ )
+ );
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hv_trace_balloon
+#endif /* _HV_TRACE_BALLOON_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
new file mode 100644
index 000000000..835e6039c
--- /dev/null
+++ b/drivers/hv/hv_util.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/reboot.h>
+#include <linux/hyperv.h>
+#include <linux/clockchips.h>
+#include <linux/ptp_clock_kernel.h>
+#include <asm/mshyperv.h>
+
+#include "hyperv_vmbus.h"
+
+#define SD_MAJOR 3
+#define SD_MINOR 0
+#define SD_MINOR_1 1
+#define SD_MINOR_2 2
+#define SD_VERSION_3_1 (SD_MAJOR << 16 | SD_MINOR_1)
+#define SD_VERSION_3_2 (SD_MAJOR << 16 | SD_MINOR_2)
+#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
+
+#define SD_MAJOR_1 1
+#define SD_VERSION_1 (SD_MAJOR_1 << 16 | SD_MINOR)
+
+#define TS_MAJOR 4
+#define TS_MINOR 0
+#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
+
+#define TS_MAJOR_1 1
+#define TS_VERSION_1 (TS_MAJOR_1 << 16 | TS_MINOR)
+
+#define TS_MAJOR_3 3
+#define TS_VERSION_3 (TS_MAJOR_3 << 16 | TS_MINOR)
+
+#define HB_MAJOR 3
+#define HB_MINOR 0
+#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
+
+#define HB_MAJOR_1 1
+#define HB_VERSION_1 (HB_MAJOR_1 << 16 | HB_MINOR)
+
+static int sd_srv_version;
+static int ts_srv_version;
+static int hb_srv_version;
+
+#define SD_VER_COUNT 4
+static const int sd_versions[] = {
+ SD_VERSION_3_2,
+ SD_VERSION_3_1,
+ SD_VERSION,
+ SD_VERSION_1
+};
+
+#define TS_VER_COUNT 3
+static const int ts_versions[] = {
+ TS_VERSION,
+ TS_VERSION_3,
+ TS_VERSION_1
+};
+
+#define HB_VER_COUNT 2
+static const int hb_versions[] = {
+ HB_VERSION,
+ HB_VERSION_1
+};
+
+#define FW_VER_COUNT 2
+static const int fw_versions[] = {
+ UTIL_FW_VERSION,
+ UTIL_WS2K8_FW_VERSION
+};
+
+/*
+ * Send the "hibernate" udev event in a thread context.
+ */
+struct hibernate_work_context {
+ struct work_struct work;
+ struct hv_device *dev;
+};
+
+static struct hibernate_work_context hibernate_context;
+static bool hibernation_supported;
+
+static void send_hibernate_uevent(struct work_struct *work)
+{
+ char *uevent_env[2] = { "EVENT=hibernate", NULL };
+ struct hibernate_work_context *ctx;
+
+ ctx = container_of(work, struct hibernate_work_context, work);
+
+ kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
+
+ pr_info("Sent hibernation uevent\n");
+}
+
+static int hv_shutdown_init(struct hv_util_service *srv)
+{
+ struct vmbus_channel *channel = srv->channel;
+
+ INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
+ hibernate_context.dev = channel->device_obj;
+
+ hibernation_supported = hv_is_hibernation_supported();
+
+ return 0;
+}
+
+static void shutdown_onchannelcallback(void *context);
+static struct hv_util_service util_shutdown = {
+ .util_cb = shutdown_onchannelcallback,
+ .util_init = hv_shutdown_init,
+};
+
+static int hv_timesync_init(struct hv_util_service *srv);
+static int hv_timesync_pre_suspend(void);
+static void hv_timesync_deinit(void);
+
+static void timesync_onchannelcallback(void *context);
+static struct hv_util_service util_timesynch = {
+ .util_cb = timesync_onchannelcallback,
+ .util_init = hv_timesync_init,
+ .util_pre_suspend = hv_timesync_pre_suspend,
+ .util_deinit = hv_timesync_deinit,
+};
+
+static void heartbeat_onchannelcallback(void *context);
+static struct hv_util_service util_heartbeat = {
+ .util_cb = heartbeat_onchannelcallback,
+};
+
+static struct hv_util_service util_kvp = {
+ .util_cb = hv_kvp_onchannelcallback,
+ .util_init = hv_kvp_init,
+ .util_pre_suspend = hv_kvp_pre_suspend,
+ .util_pre_resume = hv_kvp_pre_resume,
+ .util_deinit = hv_kvp_deinit,
+};
+
+static struct hv_util_service util_vss = {
+ .util_cb = hv_vss_onchannelcallback,
+ .util_init = hv_vss_init,
+ .util_pre_suspend = hv_vss_pre_suspend,
+ .util_pre_resume = hv_vss_pre_resume,
+ .util_deinit = hv_vss_deinit,
+};
+
+static struct hv_util_service util_fcopy = {
+ .util_cb = hv_fcopy_onchannelcallback,
+ .util_init = hv_fcopy_init,
+ .util_pre_suspend = hv_fcopy_pre_suspend,
+ .util_pre_resume = hv_fcopy_pre_resume,
+ .util_deinit = hv_fcopy_deinit,
+};
+
+static void perform_shutdown(struct work_struct *dummy)
+{
+ orderly_poweroff(true);
+}
+
+static void perform_restart(struct work_struct *dummy)
+{
+ orderly_reboot();
+}
+
+/*
+ * Perform the shutdown operation in a thread context.
+ */
+static DECLARE_WORK(shutdown_work, perform_shutdown);
+
+/*
+ * Perform the restart operation in a thread context.
+ */
+static DECLARE_WORK(restart_work, perform_restart);
+
+static void shutdown_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ struct work_struct *work = NULL;
+ u32 recvlen;
+ u64 requestid;
+ u8 *shut_txf_buf = util_shutdown.recv_buffer;
+
+ struct shutdown_msg_data *shutdown_msg;
+
+ struct icmsg_hdr *icmsghdrp;
+
+ if (vmbus_recvpacket(channel, shut_txf_buf, HV_HYP_PAGE_SIZE, &recvlen, &requestid)) {
+ pr_err_ratelimited("Shutdown request received. Could not read into shut txf buf\n");
+ return;
+ }
+
+ if (!recvlen)
+ return;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Shutdown request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
+
+ icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ shut_txf_buf, recvlen,
+ fw_versions, FW_VER_COUNT,
+ sd_versions, SD_VER_COUNT,
+ NULL, &sd_srv_version)) {
+ pr_info("Shutdown IC version %d.%d\n",
+ sd_srv_version >> 16,
+ sd_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_SHUTDOWN) {
+ /* Ensure recvlen is big enough to contain shutdown_msg_data struct */
+ if (recvlen < ICMSG_HDR + sizeof(struct shutdown_msg_data)) {
+ pr_err_ratelimited("Invalid shutdown msg data. Packet length too small: %u\n",
+ recvlen);
+ return;
+ }
+
+ shutdown_msg = (struct shutdown_msg_data *)&shut_txf_buf[ICMSG_HDR];
+
+ /*
+ * shutdown_msg->flags can be 0(shut down), 2(reboot),
+ * or 4(hibernate). It may bitwise-OR 1, which means
+ * performing the request by force. Linux always tries
+ * to perform the request by force.
+ */
+ switch (shutdown_msg->flags) {
+ case 0:
+ case 1:
+ icmsghdrp->status = HV_S_OK;
+ work = &shutdown_work;
+ pr_info("Shutdown request received - graceful shutdown initiated\n");
+ break;
+ case 2:
+ case 3:
+ icmsghdrp->status = HV_S_OK;
+ work = &restart_work;
+ pr_info("Restart request received - graceful restart initiated\n");
+ break;
+ case 4:
+ case 5:
+ pr_info("Hibernation request received\n");
+ icmsghdrp->status = hibernation_supported ?
+ HV_S_OK : HV_E_FAIL;
+ if (hibernation_supported)
+ work = &hibernate_context.work;
+ break;
+ default:
+ icmsghdrp->status = HV_E_FAIL;
+ pr_info("Shutdown request received - Invalid request\n");
+ break;
+ }
+ } else {
+ icmsghdrp->status = HV_E_FAIL;
+ pr_err_ratelimited("Shutdown request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, shut_txf_buf,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+
+ if (work)
+ schedule_work(work);
+}
+
+/*
+ * Set the host time in a process context.
+ */
+static struct work_struct adj_time_work;
+
+/*
+ * The last time sample, received from the host. PTP device responds to
+ * requests by using this data and the current partition-wide time reference
+ * count.
+ */
+static struct {
+ u64 host_time;
+ u64 ref_time;
+ spinlock_t lock;
+} host_ts;
+
+static inline u64 reftime_to_ns(u64 reftime)
+{
+ return (reftime - WLTIMEDELTA) * 100;
+}
+
+/*
+ * Hard coded threshold for host timesync delay: 600 seconds
+ */
+static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
+
+static int hv_get_adj_host_time(struct timespec64 *ts)
+{
+ u64 newtime, reftime, timediff_adj;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&host_ts.lock, flags);
+ reftime = hv_read_reference_counter();
+
+ /*
+ * We need to let the caller know that last update from host
+ * is older than the max allowable threshold. clock_gettime()
+ * and PTP ioctl do not have a documented error that we could
+ * return for this specific case. Use ESTALE to report this.
+ */
+ timediff_adj = reftime - host_ts.ref_time;
+ if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
+ pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
+ (timediff_adj * 100));
+ ret = -ESTALE;
+ }
+
+ newtime = host_ts.host_time + timediff_adj;
+ *ts = ns_to_timespec64(reftime_to_ns(newtime));
+ spin_unlock_irqrestore(&host_ts.lock, flags);
+
+ return ret;
+}
+
+static void hv_set_host_time(struct work_struct *work)
+{
+
+ struct timespec64 ts;
+
+ if (!hv_get_adj_host_time(&ts))
+ do_settimeofday64(&ts);
+}
+
+/*
+ * Synchronize time with host after reboot, restore, etc.
+ *
+ * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
+ * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
+ * message after the timesync channel is opened. Since the hv_utils module is
+ * loaded after hv_vmbus, the first message is usually missed. This bit is
+ * considered a hard request to discipline the clock.
+ *
+ * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
+ * typically used as a hint to the guest. The guest is under no obligation
+ * to discipline the clock.
+ */
+static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
+{
+ unsigned long flags;
+ u64 cur_reftime;
+
+ /*
+ * Save the adjusted time sample from the host and the snapshot
+ * of the current system time.
+ */
+ spin_lock_irqsave(&host_ts.lock, flags);
+
+ cur_reftime = hv_read_reference_counter();
+ host_ts.host_time = hosttime;
+ host_ts.ref_time = cur_reftime;
+
+ /*
+ * TimeSync v4 messages contain reference time (guest's Hyper-V
+ * clocksource read when the time sample was generated), we can
+ * improve the precision by adding the delta between now and the
+ * time of generation. For older protocols we set
+ * reftime == cur_reftime on call.
+ */
+ host_ts.host_time += (cur_reftime - reftime);
+
+ spin_unlock_irqrestore(&host_ts.lock, flags);
+
+ /* Schedule work to do do_settimeofday64() */
+ if (adj_flags & ICTIMESYNCFLAG_SYNC)
+ schedule_work(&adj_time_work);
+}
+
+/*
+ * Time Sync Channel message handler.
+ */
+static void timesync_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct icmsg_hdr *icmsghdrp;
+ struct ictimesync_data *timedatap;
+ struct ictimesync_ref_data *refdata;
+ u8 *time_txf_buf = util_timesynch.recv_buffer;
+
+ /*
+ * Drain the ring buffer and use the last packet to update
+ * host_ts
+ */
+ while (1) {
+ int ret = vmbus_recvpacket(channel, time_txf_buf,
+ HV_HYP_PAGE_SIZE, &recvlen,
+ &requestid);
+ if (ret) {
+ pr_err_ratelimited("TimeSync IC pkt recv failed (Err: %d)\n",
+ ret);
+ break;
+ }
+
+ if (!recvlen)
+ break;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Timesync request received. Packet length too small: %d\n",
+ recvlen);
+ break;
+ }
+
+ icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ time_txf_buf, recvlen,
+ fw_versions, FW_VER_COUNT,
+ ts_versions, TS_VER_COUNT,
+ NULL, &ts_srv_version)) {
+ pr_info("TimeSync IC version %d.%d\n",
+ ts_srv_version >> 16,
+ ts_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_TIMESYNC) {
+ if (ts_srv_version > TS_VERSION_3) {
+ /* Ensure recvlen is big enough to read ictimesync_ref_data */
+ if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_ref_data)) {
+ pr_err_ratelimited("Invalid ictimesync ref data. Length too small: %u\n",
+ recvlen);
+ break;
+ }
+ refdata = (struct ictimesync_ref_data *)&time_txf_buf[ICMSG_HDR];
+
+ adj_guesttime(refdata->parenttime,
+ refdata->vmreferencetime,
+ refdata->flags);
+ } else {
+ /* Ensure recvlen is big enough to read ictimesync_data */
+ if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_data)) {
+ pr_err_ratelimited("Invalid ictimesync data. Length too small: %u\n",
+ recvlen);
+ break;
+ }
+ timedatap = (struct ictimesync_data *)&time_txf_buf[ICMSG_HDR];
+
+ adj_guesttime(timedatap->parenttime,
+ hv_read_reference_counter(),
+ timedatap->flags);
+ }
+ } else {
+ icmsghdrp->status = HV_E_FAIL;
+ pr_err_ratelimited("Timesync request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, time_txf_buf,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+ }
+}
+
+/*
+ * Heartbeat functionality.
+ * Every two seconds, Hyper-V send us a heartbeat request message.
+ * we respond to this message, and Hyper-V knows we are alive.
+ */
+static void heartbeat_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct icmsg_hdr *icmsghdrp;
+ struct heartbeat_msg_data *heartbeat_msg;
+ u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
+
+ while (1) {
+
+ if (vmbus_recvpacket(channel, hbeat_txf_buf, HV_HYP_PAGE_SIZE,
+ &recvlen, &requestid)) {
+ pr_err_ratelimited("Heartbeat request received. Could not read into hbeat txf buf\n");
+ return;
+ }
+
+ if (!recvlen)
+ break;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Heartbeat request received. Packet length too small: %d\n",
+ recvlen);
+ break;
+ }
+
+ icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ hbeat_txf_buf, recvlen,
+ fw_versions, FW_VER_COUNT,
+ hb_versions, HB_VER_COUNT,
+ NULL, &hb_srv_version)) {
+
+ pr_info("Heartbeat IC version %d.%d\n",
+ hb_srv_version >> 16,
+ hb_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_HEARTBEAT) {
+ /*
+ * Ensure recvlen is big enough to read seq_num. Reserved area is not
+ * included in the check as the host may not fill it up entirely
+ */
+ if (recvlen < ICMSG_HDR + sizeof(u64)) {
+ pr_err_ratelimited("Invalid heartbeat msg data. Length too small: %u\n",
+ recvlen);
+ break;
+ }
+ heartbeat_msg = (struct heartbeat_msg_data *)&hbeat_txf_buf[ICMSG_HDR];
+
+ heartbeat_msg->seq_num += 1;
+ } else {
+ icmsghdrp->status = HV_E_FAIL;
+ pr_err_ratelimited("Heartbeat request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, hbeat_txf_buf,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+ }
+}
+
+#define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
+#define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
+
+static int util_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct hv_util_service *srv =
+ (struct hv_util_service *)dev_id->driver_data;
+ int ret;
+
+ srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
+ if (!srv->recv_buffer)
+ return -ENOMEM;
+ srv->channel = dev->channel;
+ if (srv->util_init) {
+ ret = srv->util_init(srv);
+ if (ret) {
+ ret = -ENODEV;
+ goto error1;
+ }
+ }
+
+ /*
+ * The set of services managed by the util driver are not performance
+ * critical and do not need batched reading. Furthermore, some services
+ * such as KVP can only handle one message from the host at a time.
+ * Turn off batched reading for all util drivers before we open the
+ * channel.
+ */
+ set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
+
+ hv_set_drvdata(dev, srv);
+
+ ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
+ HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
+ dev->channel);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ if (srv->util_deinit)
+ srv->util_deinit();
+error1:
+ kfree(srv->recv_buffer);
+ return ret;
+}
+
+static int util_remove(struct hv_device *dev)
+{
+ struct hv_util_service *srv = hv_get_drvdata(dev);
+
+ if (srv->util_deinit)
+ srv->util_deinit();
+ vmbus_close(dev->channel);
+ kfree(srv->recv_buffer);
+
+ return 0;
+}
+
+/*
+ * When we're in util_suspend(), all the userspace processes have been frozen
+ * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
+ * after the whole resume procedure, including util_resume(), finishes.
+ */
+static int util_suspend(struct hv_device *dev)
+{
+ struct hv_util_service *srv = hv_get_drvdata(dev);
+ int ret = 0;
+
+ if (srv->util_pre_suspend) {
+ ret = srv->util_pre_suspend();
+ if (ret)
+ return ret;
+ }
+
+ vmbus_close(dev->channel);
+
+ return 0;
+}
+
+static int util_resume(struct hv_device *dev)
+{
+ struct hv_util_service *srv = hv_get_drvdata(dev);
+ int ret = 0;
+
+ if (srv->util_pre_resume) {
+ ret = srv->util_pre_resume();
+ if (ret)
+ return ret;
+ }
+
+ ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
+ HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
+ dev->channel);
+ return ret;
+}
+
+static const struct hv_vmbus_device_id id_table[] = {
+ /* Shutdown guid */
+ { HV_SHUTDOWN_GUID,
+ .driver_data = (unsigned long)&util_shutdown
+ },
+ /* Time synch guid */
+ { HV_TS_GUID,
+ .driver_data = (unsigned long)&util_timesynch
+ },
+ /* Heartbeat guid */
+ { HV_HEART_BEAT_GUID,
+ .driver_data = (unsigned long)&util_heartbeat
+ },
+ /* KVP guid */
+ { HV_KVP_GUID,
+ .driver_data = (unsigned long)&util_kvp
+ },
+ /* VSS GUID */
+ { HV_VSS_GUID,
+ .driver_data = (unsigned long)&util_vss
+ },
+ /* File copy GUID */
+ { HV_FCOPY_GUID,
+ .driver_data = (unsigned long)&util_fcopy
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+/* The one and only one */
+static struct hv_driver util_drv = {
+ .name = "hv_utils",
+ .id_table = id_table,
+ .probe = util_probe,
+ .remove = util_remove,
+ .suspend = util_suspend,
+ .resume = util_resume,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int hv_ptp_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ return -EOPNOTSUPP;
+}
+static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ return hv_get_adj_host_time(ts);
+}
+
+static struct ptp_clock_info ptp_hyperv_info = {
+ .name = "hyperv",
+ .enable = hv_ptp_enable,
+ .adjtime = hv_ptp_adjtime,
+ .adjfreq = hv_ptp_adjfreq,
+ .gettime64 = hv_ptp_gettime,
+ .settime64 = hv_ptp_settime,
+ .owner = THIS_MODULE,
+};
+
+static struct ptp_clock *hv_ptp_clock;
+
+static int hv_timesync_init(struct hv_util_service *srv)
+{
+ spin_lock_init(&host_ts.lock);
+
+ INIT_WORK(&adj_time_work, hv_set_host_time);
+
+ /*
+ * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
+ * disabled but the driver is still useful without the PTP device
+ * as it still handles the ICTIMESYNCFLAG_SYNC case.
+ */
+ hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
+ if (IS_ERR_OR_NULL(hv_ptp_clock)) {
+ pr_err("cannot register PTP clock: %d\n",
+ PTR_ERR_OR_ZERO(hv_ptp_clock));
+ hv_ptp_clock = NULL;
+ }
+
+ return 0;
+}
+
+static void hv_timesync_cancel_work(void)
+{
+ cancel_work_sync(&adj_time_work);
+}
+
+static int hv_timesync_pre_suspend(void)
+{
+ hv_timesync_cancel_work();
+ return 0;
+}
+
+static void hv_timesync_deinit(void)
+{
+ if (hv_ptp_clock)
+ ptp_clock_unregister(hv_ptp_clock);
+
+ hv_timesync_cancel_work();
+}
+
+static int __init init_hyperv_utils(void)
+{
+ pr_info("Registering HyperV Utility Driver\n");
+
+ return vmbus_driver_register(&util_drv);
+}
+
+static void exit_hyperv_utils(void)
+{
+ pr_info("De-Registered HyperV Utility Driver\n");
+
+ vmbus_driver_unregister(&util_drv);
+}
+
+module_init(init_hyperv_utils);
+module_exit(exit_hyperv_utils);
+
+MODULE_DESCRIPTION("Hyper-V Utilities");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
new file mode 100644
index 000000000..832885198
--- /dev/null
+++ b/drivers/hv/hv_utils_transport.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel/userspace transport abstraction for Hyper-V util driver.
+ *
+ * Copyright (C) 2015, Vitaly Kuznetsov <vkuznets@redhat.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+
+#include "hyperv_vmbus.h"
+#include "hv_utils_transport.h"
+
+static DEFINE_SPINLOCK(hvt_list_lock);
+static LIST_HEAD(hvt_list);
+
+static void hvt_reset(struct hvutil_transport *hvt)
+{
+ kfree(hvt->outmsg);
+ hvt->outmsg = NULL;
+ hvt->outmsg_len = 0;
+ if (hvt->on_reset)
+ hvt->on_reset();
+}
+
+static ssize_t hvt_op_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hvutil_transport *hvt;
+ int ret;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0 ||
+ hvt->mode != HVUTIL_TRANSPORT_CHARDEV))
+ return -EINTR;
+
+ mutex_lock(&hvt->lock);
+
+ if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
+ ret = -EBADF;
+ goto out_unlock;
+ }
+
+ if (!hvt->outmsg) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (count < hvt->outmsg_len) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!copy_to_user(buf, hvt->outmsg, hvt->outmsg_len))
+ ret = hvt->outmsg_len;
+ else
+ ret = -EFAULT;
+
+ kfree(hvt->outmsg);
+ hvt->outmsg = NULL;
+ hvt->outmsg_len = 0;
+
+ if (hvt->on_read)
+ hvt->on_read();
+ hvt->on_read = NULL;
+
+out_unlock:
+ mutex_unlock(&hvt->lock);
+ return ret;
+}
+
+static ssize_t hvt_op_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hvutil_transport *hvt;
+ u8 *inmsg;
+ int ret;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ inmsg = memdup_user(buf, count);
+ if (IS_ERR(inmsg))
+ return PTR_ERR(inmsg);
+
+ if (hvt->mode == HVUTIL_TRANSPORT_DESTROY)
+ ret = -EBADF;
+ else
+ ret = hvt->on_msg(inmsg, count);
+
+ kfree(inmsg);
+
+ return ret ? ret : count;
+}
+
+static __poll_t hvt_op_poll(struct file *file, poll_table *wait)
+{
+ struct hvutil_transport *hvt;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ poll_wait(file, &hvt->outmsg_q, wait);
+
+ if (hvt->mode == HVUTIL_TRANSPORT_DESTROY)
+ return EPOLLERR | EPOLLHUP;
+
+ if (hvt->outmsg_len > 0)
+ return EPOLLIN | EPOLLRDNORM;
+
+ return 0;
+}
+
+static int hvt_op_open(struct inode *inode, struct file *file)
+{
+ struct hvutil_transport *hvt;
+ int ret = 0;
+ bool issue_reset = false;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ mutex_lock(&hvt->lock);
+
+ if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
+ ret = -EBADF;
+ } else if (hvt->mode == HVUTIL_TRANSPORT_INIT) {
+ /*
+ * Switching to CHARDEV mode. We switch bach to INIT when
+ * device gets released.
+ */
+ hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
+ }
+ else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
+ /*
+ * We're switching from netlink communication to using char
+ * device. Issue the reset first.
+ */
+ issue_reset = true;
+ hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
+ } else {
+ ret = -EBUSY;
+ }
+
+ if (issue_reset)
+ hvt_reset(hvt);
+
+ mutex_unlock(&hvt->lock);
+
+ return ret;
+}
+
+static void hvt_transport_free(struct hvutil_transport *hvt)
+{
+ misc_deregister(&hvt->mdev);
+ kfree(hvt->outmsg);
+ kfree(hvt);
+}
+
+static int hvt_op_release(struct inode *inode, struct file *file)
+{
+ struct hvutil_transport *hvt;
+ int mode_old;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ mutex_lock(&hvt->lock);
+ mode_old = hvt->mode;
+ if (hvt->mode != HVUTIL_TRANSPORT_DESTROY)
+ hvt->mode = HVUTIL_TRANSPORT_INIT;
+ /*
+ * Cleanup message buffers to avoid spurious messages when the daemon
+ * connects back.
+ */
+ hvt_reset(hvt);
+
+ if (mode_old == HVUTIL_TRANSPORT_DESTROY)
+ complete(&hvt->release);
+
+ mutex_unlock(&hvt->lock);
+
+ return 0;
+}
+
+static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+{
+ struct hvutil_transport *hvt, *hvt_found = NULL;
+
+ spin_lock(&hvt_list_lock);
+ list_for_each_entry(hvt, &hvt_list, list) {
+ if (hvt->cn_id.idx == msg->id.idx &&
+ hvt->cn_id.val == msg->id.val) {
+ hvt_found = hvt;
+ break;
+ }
+ }
+ spin_unlock(&hvt_list_lock);
+ if (!hvt_found) {
+ pr_warn("hvt_cn_callback: spurious message received!\n");
+ return;
+ }
+
+ /*
+ * Switching to NETLINK mode. Switching to CHARDEV happens when someone
+ * opens the device.
+ */
+ mutex_lock(&hvt->lock);
+ if (hvt->mode == HVUTIL_TRANSPORT_INIT)
+ hvt->mode = HVUTIL_TRANSPORT_NETLINK;
+
+ if (hvt->mode == HVUTIL_TRANSPORT_NETLINK)
+ hvt_found->on_msg(msg->data, msg->len);
+ else
+ pr_warn("hvt_cn_callback: unexpected netlink message!\n");
+ mutex_unlock(&hvt->lock);
+}
+
+int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
+ void (*on_read_cb)(void))
+{
+ struct cn_msg *cn_msg;
+ int ret = 0;
+
+ if (hvt->mode == HVUTIL_TRANSPORT_INIT ||
+ hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
+ return -EINVAL;
+ } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
+ cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
+ if (!cn_msg)
+ return -ENOMEM;
+ cn_msg->id.idx = hvt->cn_id.idx;
+ cn_msg->id.val = hvt->cn_id.val;
+ cn_msg->len = len;
+ memcpy(cn_msg->data, msg, len);
+ ret = cn_netlink_send(cn_msg, 0, 0, GFP_ATOMIC);
+ kfree(cn_msg);
+ /*
+ * We don't know when netlink messages are delivered but unlike
+ * in CHARDEV mode we're not blocked and we can send next
+ * messages right away.
+ */
+ if (on_read_cb)
+ on_read_cb();
+ return ret;
+ }
+ /* HVUTIL_TRANSPORT_CHARDEV */
+ mutex_lock(&hvt->lock);
+ if (hvt->mode != HVUTIL_TRANSPORT_CHARDEV) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (hvt->outmsg) {
+ /* Previous message wasn't received */
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+ hvt->outmsg = kzalloc(len, GFP_KERNEL);
+ if (hvt->outmsg) {
+ memcpy(hvt->outmsg, msg, len);
+ hvt->outmsg_len = len;
+ hvt->on_read = on_read_cb;
+ wake_up_interruptible(&hvt->outmsg_q);
+ } else
+ ret = -ENOMEM;
+out_unlock:
+ mutex_unlock(&hvt->lock);
+ return ret;
+}
+
+struct hvutil_transport *hvutil_transport_init(const char *name,
+ u32 cn_idx, u32 cn_val,
+ int (*on_msg)(void *, int),
+ void (*on_reset)(void))
+{
+ struct hvutil_transport *hvt;
+
+ hvt = kzalloc(sizeof(*hvt), GFP_KERNEL);
+ if (!hvt)
+ return NULL;
+
+ hvt->cn_id.idx = cn_idx;
+ hvt->cn_id.val = cn_val;
+
+ hvt->mdev.minor = MISC_DYNAMIC_MINOR;
+ hvt->mdev.name = name;
+
+ hvt->fops.owner = THIS_MODULE;
+ hvt->fops.read = hvt_op_read;
+ hvt->fops.write = hvt_op_write;
+ hvt->fops.poll = hvt_op_poll;
+ hvt->fops.open = hvt_op_open;
+ hvt->fops.release = hvt_op_release;
+
+ hvt->mdev.fops = &hvt->fops;
+
+ init_waitqueue_head(&hvt->outmsg_q);
+ mutex_init(&hvt->lock);
+ init_completion(&hvt->release);
+
+ spin_lock(&hvt_list_lock);
+ list_add(&hvt->list, &hvt_list);
+ spin_unlock(&hvt_list_lock);
+
+ hvt->on_msg = on_msg;
+ hvt->on_reset = on_reset;
+
+ if (misc_register(&hvt->mdev))
+ goto err_free_hvt;
+
+ /* Use cn_id.idx/cn_id.val to determine if we need to setup netlink */
+ if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0 &&
+ cn_add_callback(&hvt->cn_id, name, hvt_cn_callback))
+ goto err_free_hvt;
+
+ return hvt;
+
+err_free_hvt:
+ spin_lock(&hvt_list_lock);
+ list_del(&hvt->list);
+ spin_unlock(&hvt_list_lock);
+ kfree(hvt);
+ return NULL;
+}
+
+void hvutil_transport_destroy(struct hvutil_transport *hvt)
+{
+ int mode_old;
+
+ mutex_lock(&hvt->lock);
+ mode_old = hvt->mode;
+ hvt->mode = HVUTIL_TRANSPORT_DESTROY;
+ wake_up_interruptible(&hvt->outmsg_q);
+ mutex_unlock(&hvt->lock);
+
+ /*
+ * In case we were in 'chardev' mode we still have an open fd so we
+ * have to defer freeing the device. Netlink interface can be freed
+ * now.
+ */
+ spin_lock(&hvt_list_lock);
+ list_del(&hvt->list);
+ spin_unlock(&hvt_list_lock);
+ if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
+ cn_del_callback(&hvt->cn_id);
+
+ if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
+ wait_for_completion(&hvt->release);
+
+ hvt_transport_free(hvt);
+}
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
new file mode 100644
index 000000000..1c162393c
--- /dev/null
+++ b/drivers/hv/hv_utils_transport.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Kernel/userspace transport abstraction for Hyper-V util driver.
+ *
+ * Copyright (C) 2015, Vitaly Kuznetsov <vkuznets@redhat.com>
+ */
+
+#ifndef _HV_UTILS_TRANSPORT_H
+#define _HV_UTILS_TRANSPORT_H
+
+#include <linux/connector.h>
+#include <linux/miscdevice.h>
+
+enum hvutil_transport_mode {
+ HVUTIL_TRANSPORT_INIT = 0,
+ HVUTIL_TRANSPORT_NETLINK,
+ HVUTIL_TRANSPORT_CHARDEV,
+ HVUTIL_TRANSPORT_DESTROY,
+};
+
+struct hvutil_transport {
+ int mode; /* hvutil_transport_mode */
+ struct file_operations fops; /* file operations */
+ struct miscdevice mdev; /* misc device */
+ struct cb_id cn_id; /* CN_*_IDX/CN_*_VAL */
+ struct list_head list; /* hvt_list */
+ int (*on_msg)(void *, int); /* callback on new user message */
+ void (*on_reset)(void); /* callback when userspace drops */
+ void (*on_read)(void); /* callback on message read */
+ u8 *outmsg; /* message to the userspace */
+ int outmsg_len; /* its length */
+ wait_queue_head_t outmsg_q; /* poll/read wait queue */
+ struct mutex lock; /* protects struct members */
+ struct completion release; /* synchronize with fd release */
+};
+
+struct hvutil_transport *hvutil_transport_init(const char *name,
+ u32 cn_idx, u32 cn_val,
+ int (*on_msg)(void *, int),
+ void (*on_reset)(void));
+int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
+ void (*on_read_cb)(void));
+void hvutil_transport_destroy(struct hvutil_transport *hvt);
+
+#endif /* _HV_UTILS_TRANSPORT_H */
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
new file mode 100644
index 000000000..dc673edf0
--- /dev/null
+++ b/drivers/hv/hyperv_vmbus.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (c) 2011, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ * K. Y. Srinivasan <kys@microsoft.com>
+ */
+
+#ifndef _HYPERV_VMBUS_H
+#define _HYPERV_VMBUS_H
+
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <asm/sync_bitops.h>
+#include <asm/hyperv-tlfs.h>
+#include <linux/atomic.h>
+#include <linux/hyperv.h>
+#include <linux/interrupt.h>
+
+#include "hv_trace.h"
+
+/*
+ * Timeout for services such as KVP and fcopy.
+ */
+#define HV_UTIL_TIMEOUT 30
+
+/*
+ * Timeout for guest-host handshake for services.
+ */
+#define HV_UTIL_NEGO_TIMEOUT 55
+
+
+/* Definitions for the monitored notification facility */
+union hv_monitor_trigger_group {
+ u64 as_uint64;
+ struct {
+ u32 pending;
+ u32 armed;
+ };
+};
+
+struct hv_monitor_parameter {
+ union hv_connection_id connectionid;
+ u16 flagnumber;
+ u16 rsvdz;
+};
+
+union hv_monitor_trigger_state {
+ u32 asu32;
+
+ struct {
+ u32 group_enable:4;
+ u32 rsvdz:28;
+ };
+};
+
+/* struct hv_monitor_page Layout */
+/* ------------------------------------------------------ */
+/* | 0 | TriggerState (4 bytes) | Rsvd1 (4 bytes) | */
+/* | 8 | TriggerGroup[0] | */
+/* | 10 | TriggerGroup[1] | */
+/* | 18 | TriggerGroup[2] | */
+/* | 20 | TriggerGroup[3] | */
+/* | 28 | Rsvd2[0] | */
+/* | 30 | Rsvd2[1] | */
+/* | 38 | Rsvd2[2] | */
+/* | 40 | NextCheckTime[0][0] | NextCheckTime[0][1] | */
+/* | ... | */
+/* | 240 | Latency[0][0..3] | */
+/* | 340 | Rsvz3[0] | */
+/* | 440 | Parameter[0][0] | */
+/* | 448 | Parameter[0][1] | */
+/* | ... | */
+/* | 840 | Rsvd4[0] | */
+/* ------------------------------------------------------ */
+struct hv_monitor_page {
+ union hv_monitor_trigger_state trigger_state;
+ u32 rsvdz1;
+
+ union hv_monitor_trigger_group trigger_group[4];
+ u64 rsvdz2[3];
+
+ s32 next_checktime[4][32];
+
+ u16 latency[4][32];
+ u64 rsvdz3[32];
+
+ struct hv_monitor_parameter parameter[4][32];
+
+ u8 rsvdz4[1984];
+};
+
+#define HV_HYPERCALL_PARAM_ALIGN sizeof(u64)
+
+/* Definition of the hv_post_message hypercall input structure. */
+struct hv_input_post_message {
+ union hv_connection_id connectionid;
+ u32 reserved;
+ u32 message_type;
+ u32 payload_size;
+ u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+};
+
+
+enum {
+ VMBUS_MESSAGE_CONNECTION_ID = 1,
+ VMBUS_MESSAGE_CONNECTION_ID_4 = 4,
+ VMBUS_MESSAGE_PORT_ID = 1,
+ VMBUS_EVENT_CONNECTION_ID = 2,
+ VMBUS_EVENT_PORT_ID = 2,
+ VMBUS_MONITOR_CONNECTION_ID = 3,
+ VMBUS_MONITOR_PORT_ID = 3,
+ VMBUS_MESSAGE_SINT = 2,
+};
+
+/*
+ * Per cpu state for channel handling
+ */
+struct hv_per_cpu_context {
+ void *synic_message_page;
+ void *synic_event_page;
+ /*
+ * buffer to post messages to the host.
+ */
+ void *post_msg_page;
+
+ /*
+ * Starting with win8, we can take channel interrupts on any CPU;
+ * we will manage the tasklet that handles events messages on a per CPU
+ * basis.
+ */
+ struct tasklet_struct msg_dpc;
+};
+
+struct hv_context {
+ /* We only support running on top of Hyper-V
+ * So at this point this really can only contain the Hyper-V ID
+ */
+ u64 guestid;
+
+ struct hv_per_cpu_context __percpu *cpu_context;
+
+ /*
+ * To manage allocations in a NUMA node.
+ * Array indexed by numa node ID.
+ */
+ struct cpumask *hv_numa_map;
+};
+
+extern struct hv_context hv_context;
+
+/* Hv Interface */
+
+extern int hv_init(void);
+
+extern int hv_post_message(union hv_connection_id connection_id,
+ enum hv_message_type message_type,
+ void *payload, size_t payload_size);
+
+extern int hv_synic_alloc(void);
+
+extern void hv_synic_free(void);
+
+extern void hv_synic_enable_regs(unsigned int cpu);
+extern int hv_synic_init(unsigned int cpu);
+
+extern void hv_synic_disable_regs(unsigned int cpu);
+extern int hv_synic_cleanup(unsigned int cpu);
+
+/* Interface */
+
+void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
+
+int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ struct page *pages, u32 pagecnt, u32 max_pkt_size);
+
+void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
+
+int hv_ringbuffer_write(struct vmbus_channel *channel,
+ const struct kvec *kv_list, u32 kv_count,
+ u64 requestid, u64 *trans_id);
+
+int hv_ringbuffer_read(struct vmbus_channel *channel,
+ void *buffer, u32 buflen, u32 *buffer_actual_len,
+ u64 *requestid, bool raw);
+
+/*
+ * The Maximum number of channels (16384) is determined by the size of the
+ * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
+ * send endpoint interrupts, and the other is to receive endpoint interrupts.
+ */
+#define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3)
+
+/* The value here must be in multiple of 32 */
+#define MAX_NUM_CHANNELS_SUPPORTED 256
+
+#define MAX_CHANNEL_RELIDS \
+ max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
+
+enum vmbus_connect_state {
+ DISCONNECTED,
+ CONNECTING,
+ CONNECTED,
+ DISCONNECTING
+};
+
+#define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT
+
+/*
+ * The CPU that Hyper-V will interrupt for VMBUS messages, such as
+ * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
+ */
+#define VMBUS_CONNECT_CPU 0
+
+struct vmbus_connection {
+ u32 msg_conn_id;
+
+ atomic_t offer_in_progress;
+
+ enum vmbus_connect_state conn_state;
+
+ atomic_t next_gpadl_handle;
+
+ struct completion unload_event;
+ /*
+ * Represents channel interrupts. Each bit position represents a
+ * channel. When a channel sends an interrupt via VMBUS, it finds its
+ * bit in the sendInterruptPage, set it and calls Hv to generate a port
+ * event. The other end receives the port event and parse the
+ * recvInterruptPage to see which bit is set
+ */
+ void *int_page;
+ void *send_int_page;
+ void *recv_int_page;
+
+ /*
+ * 2 pages - 1st page for parent->child notification and 2nd
+ * is child->parent notification
+ */
+ struct hv_monitor_page *monitor_pages[2];
+ void *monitor_pages_original[2];
+ phys_addr_t monitor_pages_pa[2];
+ struct list_head chn_msg_list;
+ spinlock_t channelmsg_lock;
+
+ /* List of channels */
+ struct list_head chn_list;
+ struct mutex channel_mutex;
+
+ /* Array of channels */
+ struct vmbus_channel **channels;
+
+ /*
+ * An offer message is handled first on the work_queue, and then
+ * is further handled on handle_primary_chan_wq or
+ * handle_sub_chan_wq.
+ */
+ struct workqueue_struct *work_queue;
+ struct workqueue_struct *handle_primary_chan_wq;
+ struct workqueue_struct *handle_sub_chan_wq;
+ struct workqueue_struct *rescind_work_queue;
+
+ /*
+ * On suspension of the vmbus, the accumulated offer messages
+ * must be dropped.
+ */
+ bool ignore_any_offer_msg;
+
+ /*
+ * The number of sub-channels and hv_sock channels that should be
+ * cleaned up upon suspend: sub-channels will be re-created upon
+ * resume, and hv_sock channels should not survive suspend.
+ */
+ atomic_t nr_chan_close_on_suspend;
+ /*
+ * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
+ * drop to zero.
+ */
+ struct completion ready_for_suspend_event;
+
+ /*
+ * The number of primary channels that should be "fixed up"
+ * upon resume: these channels are re-offered upon resume, and some
+ * fields of the channel offers (i.e. child_relid and connection_id)
+ * can change, so the old offermsg must be fixed up, before the resume
+ * callbacks of the VSC drivers start to further touch the channels.
+ */
+ atomic_t nr_chan_fixup_on_resume;
+ /*
+ * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
+ * drop to zero.
+ */
+ struct completion ready_for_resume_event;
+};
+
+
+struct vmbus_msginfo {
+ /* Bookkeeping stuff */
+ struct list_head msglist_entry;
+
+ /* The message itself */
+ unsigned char msg[];
+};
+
+
+extern struct vmbus_connection vmbus_connection;
+
+int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
+
+static inline void vmbus_send_interrupt(u32 relid)
+{
+ sync_set_bit(relid, vmbus_connection.send_int_page);
+}
+
+enum vmbus_message_handler_type {
+ /* The related handler can sleep. */
+ VMHT_BLOCKING = 0,
+
+ /* The related handler must NOT sleep. */
+ VMHT_NON_BLOCKING = 1,
+};
+
+struct vmbus_channel_message_table_entry {
+ enum vmbus_channel_message_type message_type;
+ enum vmbus_message_handler_type handler_type;
+ void (*message_handler)(struct vmbus_channel_message_header *msg);
+ u32 min_payload_len;
+};
+
+extern const struct vmbus_channel_message_table_entry
+ channel_message_table[CHANNELMSG_COUNT];
+
+
+/* General vmbus interface */
+
+struct hv_device *vmbus_device_create(const guid_t *type,
+ const guid_t *instance,
+ struct vmbus_channel *channel);
+
+int vmbus_device_register(struct hv_device *child_device_obj);
+void vmbus_device_unregister(struct hv_device *device_obj);
+int vmbus_add_channel_kobj(struct hv_device *device_obj,
+ struct vmbus_channel *channel);
+
+void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
+
+void vmbus_channel_map_relid(struct vmbus_channel *channel);
+void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
+
+struct vmbus_channel *relid2channel(u32 relid);
+
+void vmbus_free_channels(void);
+
+/* Connection interface */
+
+int vmbus_connect(void);
+void vmbus_disconnect(void);
+
+int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
+
+void vmbus_on_event(unsigned long data);
+void vmbus_on_msg_dpc(unsigned long data);
+
+int hv_kvp_init(struct hv_util_service *srv);
+void hv_kvp_deinit(void);
+int hv_kvp_pre_suspend(void);
+int hv_kvp_pre_resume(void);
+void hv_kvp_onchannelcallback(void *context);
+
+int hv_vss_init(struct hv_util_service *srv);
+void hv_vss_deinit(void);
+int hv_vss_pre_suspend(void);
+int hv_vss_pre_resume(void);
+void hv_vss_onchannelcallback(void *context);
+
+int hv_fcopy_init(struct hv_util_service *srv);
+void hv_fcopy_deinit(void);
+int hv_fcopy_pre_suspend(void);
+int hv_fcopy_pre_resume(void);
+void hv_fcopy_onchannelcallback(void *context);
+void vmbus_initiate_unload(bool crash);
+
+static inline void hv_poll_channel(struct vmbus_channel *channel,
+ void (*cb)(void *))
+{
+ if (!channel)
+ return;
+ cb(channel);
+}
+
+enum hvutil_device_state {
+ HVUTIL_DEVICE_INIT = 0, /* driver is loaded, waiting for userspace */
+ HVUTIL_READY, /* userspace is registered */
+ HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */
+ HVUTIL_USERSPACE_REQ, /* request to userspace was sent */
+ HVUTIL_USERSPACE_RECV, /* reply from userspace was received */
+ HVUTIL_DEVICE_DYING, /* driver unload is in progress */
+};
+
+enum delay {
+ INTERRUPT_DELAY = 0,
+ MESSAGE_DELAY = 1,
+};
+
+extern const struct vmbus_device vmbus_devs[];
+
+static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
+{
+ return vmbus_devs[channel->device_id].perf_device;
+}
+
+static inline bool hv_is_allocated_cpu(unsigned int cpu)
+{
+ struct vmbus_channel *channel, *sc;
+
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+ /*
+ * List additions/deletions as well as updates of the target CPUs are
+ * protected by channel_mutex.
+ */
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (!hv_is_perf_channel(channel))
+ continue;
+ if (channel->target_cpu == cpu)
+ return true;
+ list_for_each_entry(sc, &channel->sc_list, sc_list) {
+ if (sc->target_cpu == cpu)
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline void hv_set_allocated_cpu(unsigned int cpu)
+{
+ cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
+}
+
+static inline void hv_clear_allocated_cpu(unsigned int cpu)
+{
+ if (hv_is_allocated_cpu(cpu))
+ return;
+ cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
+}
+
+static inline void hv_update_allocated_cpus(unsigned int old_cpu,
+ unsigned int new_cpu)
+{
+ hv_set_allocated_cpu(new_cpu);
+ hv_clear_allocated_cpu(old_cpu);
+}
+
+#ifdef CONFIG_HYPERV_TESTING
+
+int hv_debug_add_dev_dir(struct hv_device *dev);
+void hv_debug_rm_dev_dir(struct hv_device *dev);
+void hv_debug_rm_all_dir(void);
+int hv_debug_init(void);
+void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
+
+#else /* CONFIG_HYPERV_TESTING */
+
+static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
+static inline void hv_debug_rm_all_dir(void) {};
+static inline void hv_debug_delay_test(struct vmbus_channel *channel,
+ enum delay delay_type) {};
+static inline int hv_debug_init(void)
+{
+ return -1;
+}
+
+static inline int hv_debug_add_dev_dir(struct hv_device *dev)
+{
+ return -1;
+}
+
+#endif /* CONFIG_HYPERV_TESTING */
+
+#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
new file mode 100644
index 000000000..c6692fd5a
--- /dev/null
+++ b/drivers/hv/ring_buffer.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ * K. Y. Srinivasan <kys@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/hyperv.h>
+#include <linux/uio.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <linux/io.h>
+#include <asm/mshyperv.h>
+
+#include "hyperv_vmbus.h"
+
+#define VMBUS_PKT_TRAILER 8
+
+/*
+ * When we write to the ring buffer, check if the host needs to
+ * be signaled. Here is the details of this protocol:
+ *
+ * 1. The host guarantees that while it is draining the
+ * ring buffer, it will set the interrupt_mask to
+ * indicate it does not need to be interrupted when
+ * new data is placed.
+ *
+ * 2. The host guarantees that it will completely drain
+ * the ring buffer before exiting the read loop. Further,
+ * once the ring buffer is empty, it will clear the
+ * interrupt_mask and re-check to see if new data has
+ * arrived.
+ *
+ * KYS: Oct. 30, 2016:
+ * It looks like Windows hosts have logic to deal with DOS attacks that
+ * can be triggered if it receives interrupts when it is not expecting
+ * the interrupt. The host expects interrupts only when the ring
+ * transitions from empty to non-empty (or full to non full on the guest
+ * to host ring).
+ * So, base the signaling decision solely on the ring state until the
+ * host logic is fixed.
+ */
+
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+
+ virt_mb();
+ if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
+ return;
+
+ /* check interrupt_mask before read_index */
+ virt_rmb();
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
+ ++channel->intr_out_empty;
+ vmbus_setevent(channel);
+ }
+}
+
+/* Get the next write location for the specified ring buffer. */
+static inline u32
+hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
+{
+ u32 next = ring_info->ring_buffer->write_index;
+
+ return next;
+}
+
+/* Set the next write location for the specified ring buffer. */
+static inline void
+hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
+ u32 next_write_location)
+{
+ ring_info->ring_buffer->write_index = next_write_location;
+}
+
+/* Get the size of the ring buffer. */
+static inline u32
+hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
+{
+ return ring_info->ring_datasize;
+}
+
+/* Get the read and write indices as u64 of the specified ring buffer. */
+static inline u64
+hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
+{
+ return (u64)ring_info->ring_buffer->write_index << 32;
+}
+
+/*
+ * Helper routine to copy from source to ring buffer.
+ * Assume there is enough room. Handles wrap-around in dest case only!!
+ */
+static u32 hv_copyto_ringbuffer(
+ struct hv_ring_buffer_info *ring_info,
+ u32 start_write_offset,
+ const void *src,
+ u32 srclen)
+{
+ void *ring_buffer = hv_get_ring_buffer(ring_info);
+ u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
+
+ memcpy(ring_buffer + start_write_offset, src, srclen);
+
+ start_write_offset += srclen;
+ if (start_write_offset >= ring_buffer_size)
+ start_write_offset -= ring_buffer_size;
+
+ return start_write_offset;
+}
+
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static void
+hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
+ u32 *read, u32 *write)
+{
+ u32 read_loc, write_loc, dsize;
+
+ /* Capture the read/write indices before they changed */
+ read_loc = READ_ONCE(rbi->ring_buffer->read_index);
+ write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+ dsize = rbi->ring_datasize;
+
+ *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+ read_loc - write_loc;
+ *read = dsize - *write;
+}
+
+/* Get various debug metrics for the specified ring buffer. */
+int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info)
+{
+ u32 bytes_avail_towrite;
+ u32 bytes_avail_toread;
+
+ mutex_lock(&ring_info->ring_buffer_mutex);
+
+ if (!ring_info->ring_buffer) {
+ mutex_unlock(&ring_info->ring_buffer_mutex);
+ return -EINVAL;
+ }
+
+ hv_get_ringbuffer_availbytes(ring_info,
+ &bytes_avail_toread,
+ &bytes_avail_towrite);
+ debug_info->bytes_avail_toread = bytes_avail_toread;
+ debug_info->bytes_avail_towrite = bytes_avail_towrite;
+ debug_info->current_read_index = ring_info->ring_buffer->read_index;
+ debug_info->current_write_index = ring_info->ring_buffer->write_index;
+ debug_info->current_interrupt_mask
+ = ring_info->ring_buffer->interrupt_mask;
+ mutex_unlock(&ring_info->ring_buffer_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
+
+/* Initialize a channel's ring buffer info mutex locks */
+void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
+{
+ mutex_init(&channel->inbound.ring_buffer_mutex);
+ mutex_init(&channel->outbound.ring_buffer_mutex);
+}
+
+/* Initialize the ring buffer. */
+int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ struct page *pages, u32 page_cnt, u32 max_pkt_size)
+{
+ struct page **pages_wraparound;
+ unsigned long *pfns_wraparound;
+ u64 pfn;
+ int i;
+
+ BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
+
+ /*
+ * First page holds struct hv_ring_buffer, do wraparound mapping for
+ * the rest.
+ */
+ if (hv_isolation_type_snp()) {
+ pfn = page_to_pfn(pages) +
+ PFN_DOWN(ms_hyperv.shared_gpa_boundary);
+
+ pfns_wraparound = kcalloc(page_cnt * 2 - 1,
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!pfns_wraparound)
+ return -ENOMEM;
+
+ pfns_wraparound[0] = pfn;
+ for (i = 0; i < 2 * (page_cnt - 1); i++)
+ pfns_wraparound[i + 1] = pfn + i % (page_cnt - 1) + 1;
+
+ ring_info->ring_buffer = (struct hv_ring_buffer *)
+ vmap_pfn(pfns_wraparound, page_cnt * 2 - 1,
+ PAGE_KERNEL);
+ kfree(pfns_wraparound);
+
+ if (!ring_info->ring_buffer)
+ return -ENOMEM;
+
+ /* Zero ring buffer after setting memory host visibility. */
+ memset(ring_info->ring_buffer, 0x00, PAGE_SIZE * page_cnt);
+ } else {
+ pages_wraparound = kcalloc(page_cnt * 2 - 1,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages_wraparound)
+ return -ENOMEM;
+
+ pages_wraparound[0] = pages;
+ for (i = 0; i < 2 * (page_cnt - 1); i++)
+ pages_wraparound[i + 1] =
+ &pages[i % (page_cnt - 1) + 1];
+
+ ring_info->ring_buffer = (struct hv_ring_buffer *)
+ vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
+ PAGE_KERNEL);
+
+ kfree(pages_wraparound);
+ if (!ring_info->ring_buffer)
+ return -ENOMEM;
+ }
+
+
+ ring_info->ring_buffer->read_index =
+ ring_info->ring_buffer->write_index = 0;
+
+ /* Set the feature bit for enabling flow control. */
+ ring_info->ring_buffer->feature_bits.value = 1;
+
+ ring_info->ring_size = page_cnt << PAGE_SHIFT;
+ ring_info->ring_size_div10_reciprocal =
+ reciprocal_value(ring_info->ring_size / 10);
+ ring_info->ring_datasize = ring_info->ring_size -
+ sizeof(struct hv_ring_buffer);
+ ring_info->priv_read_index = 0;
+
+ /* Initialize buffer that holds copies of incoming packets */
+ if (max_pkt_size) {
+ ring_info->pkt_buffer = kzalloc(max_pkt_size, GFP_KERNEL);
+ if (!ring_info->pkt_buffer)
+ return -ENOMEM;
+ ring_info->pkt_buffer_size = max_pkt_size;
+ }
+
+ spin_lock_init(&ring_info->ring_lock);
+
+ return 0;
+}
+
+/* Cleanup the ring buffer. */
+void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
+{
+ mutex_lock(&ring_info->ring_buffer_mutex);
+ vunmap(ring_info->ring_buffer);
+ ring_info->ring_buffer = NULL;
+ mutex_unlock(&ring_info->ring_buffer_mutex);
+
+ kfree(ring_info->pkt_buffer);
+ ring_info->pkt_buffer = NULL;
+ ring_info->pkt_buffer_size = 0;
+}
+
+/*
+ * Check if the ring buffer spinlock is available to take or not; used on
+ * atomic contexts, like panic path (see the Hyper-V framebuffer driver).
+ */
+
+bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *rinfo = &channel->outbound;
+
+ return spin_is_locked(&rinfo->ring_lock);
+}
+EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy);
+
+/* Write to the ring buffer. */
+int hv_ringbuffer_write(struct vmbus_channel *channel,
+ const struct kvec *kv_list, u32 kv_count,
+ u64 requestid, u64 *trans_id)
+{
+ int i;
+ u32 bytes_avail_towrite;
+ u32 totalbytes_towrite = sizeof(u64);
+ u32 next_write_location;
+ u32 old_write;
+ u64 prev_indices;
+ unsigned long flags;
+ struct hv_ring_buffer_info *outring_info = &channel->outbound;
+ struct vmpacket_descriptor *desc = kv_list[0].iov_base;
+ u64 __trans_id, rqst_id = VMBUS_NO_RQSTOR;
+
+ if (channel->rescind)
+ return -ENODEV;
+
+ for (i = 0; i < kv_count; i++)
+ totalbytes_towrite += kv_list[i].iov_len;
+
+ spin_lock_irqsave(&outring_info->ring_lock, flags);
+
+ bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
+
+ /*
+ * If there is only room for the packet, assume it is full.
+ * Otherwise, the next time around, we think the ring buffer
+ * is empty since the read index == write index.
+ */
+ if (bytes_avail_towrite <= totalbytes_towrite) {
+ ++channel->out_full_total;
+
+ if (!channel->out_full_flag) {
+ ++channel->out_full_first;
+ channel->out_full_flag = true;
+ }
+
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ return -EAGAIN;
+ }
+
+ channel->out_full_flag = false;
+
+ /* Write to the ring buffer */
+ next_write_location = hv_get_next_write_location(outring_info);
+
+ old_write = next_write_location;
+
+ for (i = 0; i < kv_count; i++) {
+ next_write_location = hv_copyto_ringbuffer(outring_info,
+ next_write_location,
+ kv_list[i].iov_base,
+ kv_list[i].iov_len);
+ }
+
+ /*
+ * Allocate the request ID after the data has been copied into the
+ * ring buffer. Once this request ID is allocated, the completion
+ * path could find the data and free it.
+ */
+
+ if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
+ if (channel->next_request_id_callback != NULL) {
+ rqst_id = channel->next_request_id_callback(channel, requestid);
+ if (rqst_id == VMBUS_RQST_ERROR) {
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ return -EAGAIN;
+ }
+ }
+ }
+ desc = hv_get_ring_buffer(outring_info) + old_write;
+ __trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
+ /*
+ * Ensure the compiler doesn't generate code that reads the value of
+ * the transaction ID from the ring buffer, which is shared with the
+ * Hyper-V host and subject to being changed at any time.
+ */
+ WRITE_ONCE(desc->trans_id, __trans_id);
+ if (trans_id)
+ *trans_id = __trans_id;
+
+ /* Set previous packet start */
+ prev_indices = hv_get_ring_bufferindices(outring_info);
+
+ next_write_location = hv_copyto_ringbuffer(outring_info,
+ next_write_location,
+ &prev_indices,
+ sizeof(u64));
+
+ /* Issue a full memory barrier before updating the write index */
+ virt_mb();
+
+ /* Now, update the write location */
+ hv_set_next_write_location(outring_info, next_write_location);
+
+
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+ hv_signal_on_write(old_write, channel);
+
+ if (channel->rescind) {
+ if (rqst_id != VMBUS_NO_RQSTOR) {
+ /* Reclaim request ID to avoid leak of IDs */
+ if (channel->request_addr_callback != NULL)
+ channel->request_addr_callback(channel, rqst_id);
+ }
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int hv_ringbuffer_read(struct vmbus_channel *channel,
+ void *buffer, u32 buflen, u32 *buffer_actual_len,
+ u64 *requestid, bool raw)
+{
+ struct vmpacket_descriptor *desc;
+ u32 packetlen, offset;
+
+ if (unlikely(buflen == 0))
+ return -EINVAL;
+
+ *buffer_actual_len = 0;
+ *requestid = 0;
+
+ /* Make sure there is something to read */
+ desc = hv_pkt_iter_first(channel);
+ if (desc == NULL) {
+ /*
+ * No error is set when there is even no header, drivers are
+ * supposed to analyze buffer_actual_len.
+ */
+ return 0;
+ }
+
+ offset = raw ? 0 : (desc->offset8 << 3);
+ packetlen = (desc->len8 << 3) - offset;
+ *buffer_actual_len = packetlen;
+ *requestid = desc->trans_id;
+
+ if (unlikely(packetlen > buflen))
+ return -ENOBUFS;
+
+ /* since ring is double mapped, only one copy is necessary */
+ memcpy(buffer, (const char *)desc + offset, packetlen);
+
+ /* Advance ring index to next packet descriptor */
+ __hv_pkt_iter_next(channel, desc);
+
+ /* Notify host of update */
+ hv_pkt_iter_close(channel);
+
+ return 0;
+}
+
+/*
+ * Determine number of bytes available in ring buffer after
+ * the current iterator (priv_read_index) location.
+ *
+ * This is similar to hv_get_bytes_to_read but with private
+ * read index instead.
+ */
+static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
+{
+ u32 priv_read_loc = rbi->priv_read_index;
+ u32 write_loc;
+
+ /*
+ * The Hyper-V host writes the packet data, then uses
+ * store_release() to update the write_index. Use load_acquire()
+ * here to prevent loads of the packet data from being re-ordered
+ * before the read of the write_index and potentially getting
+ * stale data.
+ */
+ write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
+
+ if (write_loc >= priv_read_loc)
+ return write_loc - priv_read_loc;
+ else
+ return (rbi->ring_datasize - priv_read_loc) + write_loc;
+}
+
+/*
+ * Get first vmbus packet from ring buffer after read_index
+ *
+ * If ring buffer is empty, returns NULL and no other action needed.
+ */
+struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ struct vmpacket_descriptor *desc, *desc_copy;
+ u32 bytes_avail, pkt_len, pkt_offset;
+
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
+
+ bytes_avail = hv_pkt_iter_avail(rbi);
+ if (bytes_avail < sizeof(struct vmpacket_descriptor))
+ return NULL;
+ bytes_avail = min(rbi->pkt_buffer_size, bytes_avail);
+
+ desc = (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
+
+ /*
+ * Ensure the compiler does not use references to incoming Hyper-V values (which
+ * could change at any moment) when reading local variables later in the code
+ */
+ pkt_len = READ_ONCE(desc->len8) << 3;
+ pkt_offset = READ_ONCE(desc->offset8) << 3;
+
+ /*
+ * If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and
+ * rbi->pkt_buffer_size
+ */
+ if (pkt_len < sizeof(struct vmpacket_descriptor) || pkt_len > bytes_avail)
+ pkt_len = bytes_avail;
+
+ /*
+ * If pkt_offset is invalid, arbitrarily set it to
+ * the size of vmpacket_descriptor
+ */
+ if (pkt_offset < sizeof(struct vmpacket_descriptor) || pkt_offset > pkt_len)
+ pkt_offset = sizeof(struct vmpacket_descriptor);
+
+ /* Copy the Hyper-V packet out of the ring buffer */
+ desc_copy = (struct vmpacket_descriptor *)rbi->pkt_buffer;
+ memcpy(desc_copy, desc, pkt_len);
+
+ /*
+ * Hyper-V could still change len8 and offset8 after the earlier read.
+ * Ensure that desc_copy has legal values for len8 and offset8 that
+ * are consistent with the copy we just made
+ */
+ desc_copy->len8 = pkt_len >> 3;
+ desc_copy->offset8 = pkt_offset >> 3;
+
+ return desc_copy;
+}
+EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
+
+/*
+ * Get next vmbus packet from ring buffer.
+ *
+ * Advances the current location (priv_read_index) and checks for more
+ * data. If the end of the ring buffer is reached, then return NULL.
+ */
+struct vmpacket_descriptor *
+__hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *desc)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ u32 packetlen = desc->len8 << 3;
+ u32 dsize = rbi->ring_datasize;
+
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
+ /* bump offset to next potential packet */
+ rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
+ if (rbi->priv_read_index >= dsize)
+ rbi->priv_read_index -= dsize;
+
+ /* more data? */
+ return hv_pkt_iter_first(channel);
+}
+EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
+
+/* How many bytes were read in this iterator cycle */
+static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
+ u32 start_read_index)
+{
+ if (rbi->priv_read_index >= start_read_index)
+ return rbi->priv_read_index - start_read_index;
+ else
+ return rbi->ring_datasize - start_read_index +
+ rbi->priv_read_index;
+}
+
+/*
+ * Update host ring buffer after iterating over packets. If the host has
+ * stopped queuing new entries because it found the ring buffer full, and
+ * sufficient space is being freed up, signal the host. But be careful to
+ * only signal the host when necessary, both for performance reasons and
+ * because Hyper-V protects itself by throttling guests that signal
+ * inappropriately.
+ *
+ * Determining when to signal is tricky. There are three key data inputs
+ * that must be handled in this order to avoid race conditions:
+ *
+ * 1. Update the read_index
+ * 2. Read the pending_send_sz
+ * 3. Read the current write_index
+ *
+ * The interrupt_mask is not used to determine when to signal. The
+ * interrupt_mask is used only on the guest->host ring buffer when
+ * sending requests to the host. The host does not use it on the host->
+ * guest ring buffer to indicate whether it should be signaled.
+ */
+void hv_pkt_iter_close(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
+
+ /*
+ * Make sure all reads are done before we update the read index since
+ * the writer may start writing to the read area once the read index
+ * is updated.
+ */
+ virt_rmb();
+ start_read_index = rbi->ring_buffer->read_index;
+ rbi->ring_buffer->read_index = rbi->priv_read_index;
+
+ /*
+ * Older versions of Hyper-V (before WS2102 and Win8) do not
+ * implement pending_send_sz and simply poll if the host->guest
+ * ring buffer is full. No signaling is needed or expected.
+ */
+ if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
+ return;
+
+ /*
+ * Issue a full memory barrier before making the signaling decision.
+ * If reading pending_send_sz were to be reordered and happen
+ * before we commit the new read_index, a race could occur. If the
+ * host were to set the pending_send_sz after we have sampled
+ * pending_send_sz, and the ring buffer blocks before we commit the
+ * read index, we could miss sending the interrupt. Issue a full
+ * memory barrier to address this.
+ */
+ virt_mb();
+
+ /*
+ * If the pending_send_sz is zero, then the ring buffer is not
+ * blocked and there is no need to signal. This is far by the
+ * most common case, so exit quickly for best performance.
+ */
+ pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
+ if (!pending_sz)
+ return;
+
+ /*
+ * Ensure the read of write_index in hv_get_bytes_to_write()
+ * happens after the read of pending_send_sz.
+ */
+ virt_rmb();
+ curr_write_sz = hv_get_bytes_to_write(rbi);
+ bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
+
+ /*
+ * We want to signal the host only if we're transitioning
+ * from a "not enough free space" state to a "enough free
+ * space" state. For example, it's possible that this function
+ * could run and free up enough space to signal the host, and then
+ * run again and free up additional space before the host has a
+ * chance to clear the pending_send_sz. The 2nd invocation would
+ * be a null transition from "enough free space" to "enough free
+ * space", which doesn't warrant a signal.
+ *
+ * Exactly filling the ring buffer is treated as "not enough
+ * space". The ring buffer always must have at least one byte
+ * empty so the empty and full conditions are distinguishable.
+ * hv_get_bytes_to_write() doesn't fully tell the truth in
+ * this regard.
+ *
+ * So first check if we were in the "enough free space" state
+ * before we began the iteration. If so, the host was not
+ * blocked, and there's no need to signal.
+ */
+ if (curr_write_sz - bytes_read > pending_sz)
+ return;
+
+ /*
+ * Similarly, if the new state is "not enough space", then
+ * there's no need to signal.
+ */
+ if (curr_write_sz <= pending_sz)
+ return;
+
+ ++channel->intr_in_full;
+ vmbus_setevent(channel);
+}
+EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
new file mode 100644
index 000000000..e9c3f1e82
--- /dev/null
+++ b/drivers/hv/vmbus_drv.c
@@ -0,0 +1,2828 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ * K. Y. Srinivasan <kys@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/sysctl.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/hyperv.h>
+#include <linux/kernel_stat.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/sched/isolation.h>
+#include <linux/sched/task_stack.h>
+
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
+#include <linux/ptrace.h>
+#include <linux/screen_info.h>
+#include <linux/kdebug.h>
+#include <linux/efi.h>
+#include <linux/random.h>
+#include <linux/kernel.h>
+#include <linux/syscore_ops.h>
+#include <linux/dma-map-ops.h>
+#include <linux/pci.h>
+#include <clocksource/hyperv_timer.h>
+#include "hyperv_vmbus.h"
+
+struct vmbus_dynid {
+ struct list_head node;
+ struct hv_vmbus_device_id id;
+};
+
+static struct acpi_device *hv_acpi_dev;
+
+static int hyperv_cpuhp_online;
+
+static void *hv_panic_page;
+
+static long __percpu *vmbus_evt;
+
+/* Values parsed from ACPI DSDT */
+int vmbus_irq;
+int vmbus_interrupt;
+
+/*
+ * Boolean to control whether to report panic messages over Hyper-V.
+ *
+ * It can be set via /proc/sys/kernel/hyperv_record_panic_msg
+ */
+static int sysctl_record_panic_msg = 1;
+
+static int hyperv_report_reg(void)
+{
+ return !sysctl_record_panic_msg || !hv_panic_page;
+}
+
+static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
+ void *args)
+{
+ struct pt_regs *regs;
+
+ vmbus_initiate_unload(true);
+
+ /*
+ * Hyper-V should be notified only once about a panic. If we will be
+ * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+ * here.
+ */
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
+ && hyperv_report_reg()) {
+ regs = current_pt_regs();
+ hyperv_report_panic(regs, val, false);
+ }
+ return NOTIFY_DONE;
+}
+
+static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
+ void *args)
+{
+ struct die_args *die = args;
+ struct pt_regs *regs = die->regs;
+
+ /* Don't notify Hyper-V if the die event is other than oops */
+ if (val != DIE_OOPS)
+ return NOTIFY_DONE;
+
+ /*
+ * Hyper-V should be notified only once about a panic. If we will be
+ * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+ * here.
+ */
+ if (hyperv_report_reg())
+ hyperv_report_panic(regs, val, true);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hyperv_die_block = {
+ .notifier_call = hyperv_die_event,
+};
+static struct notifier_block hyperv_panic_block = {
+ .notifier_call = hyperv_panic_event,
+};
+
+static const char *fb_mmio_name = "fb_range";
+static struct resource *fb_mmio;
+static struct resource *hyperv_mmio;
+static DEFINE_MUTEX(hyperv_mmio_lock);
+
+static int vmbus_exists(void)
+{
+ if (hv_acpi_dev == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
+static u8 channel_monitor_group(const struct vmbus_channel *channel)
+{
+ return (u8)channel->offermsg.monitorid / 32;
+}
+
+static u8 channel_monitor_offset(const struct vmbus_channel *channel)
+{
+ return (u8)channel->offermsg.monitorid % 32;
+}
+
+static u32 channel_pending(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
+{
+ u8 monitor_group = channel_monitor_group(channel);
+
+ return monitor_page->trigger_group[monitor_group].pending;
+}
+
+static u32 channel_latency(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
+{
+ u8 monitor_group = channel_monitor_group(channel);
+ u8 monitor_offset = channel_monitor_offset(channel);
+
+ return monitor_page->latency[monitor_group][monitor_offset];
+}
+
+static u32 channel_conn_id(struct vmbus_channel *channel,
+ struct hv_monitor_page *monitor_page)
+{
+ u8 monitor_group = channel_monitor_group(channel);
+ u8 monitor_offset = channel_monitor_offset(channel);
+
+ return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", hv_dev->channel->state);
+}
+static DEVICE_ATTR_RO(state);
+
+static ssize_t monitor_id_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
+}
+static DEVICE_ATTR_RO(monitor_id);
+
+static ssize_t class_id_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_type);
+}
+static DEVICE_ATTR_RO(class_id);
+
+static ssize_t device_id_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_instance);
+}
+static DEVICE_ATTR_RO(device_id);
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
+}
+static DEVICE_ATTR_RO(modalias);
+
+#ifdef CONFIG_NUMA
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
+}
+static DEVICE_ATTR_RO(numa_node);
+#endif
+
+static ssize_t server_monitor_pending_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
+}
+static DEVICE_ATTR_RO(server_monitor_pending);
+
+static ssize_t client_monitor_pending_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(client_monitor_pending);
+
+static ssize_t server_monitor_latency_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
+}
+static DEVICE_ATTR_RO(server_monitor_latency);
+
+static ssize_t client_monitor_latency_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(client_monitor_latency);
+
+static ssize_t server_monitor_conn_id_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
+}
+static DEVICE_ATTR_RO(server_monitor_conn_id);
+
+static ssize_t client_monitor_conn_id_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(client_monitor_conn_id);
+
+static ssize_t out_intr_mask_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
+}
+static DEVICE_ATTR_RO(out_intr_mask);
+
+static ssize_t out_read_index_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", outbound.current_read_index);
+}
+static DEVICE_ATTR_RO(out_read_index);
+
+static ssize_t out_write_index_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", outbound.current_write_index);
+}
+static DEVICE_ATTR_RO(out_write_index);
+
+static ssize_t out_read_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
+}
+static DEVICE_ATTR_RO(out_read_bytes_avail);
+
+static ssize_t out_write_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
+}
+static DEVICE_ATTR_RO(out_write_bytes_avail);
+
+static ssize_t in_intr_mask_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
+}
+static DEVICE_ATTR_RO(in_intr_mask);
+
+static ssize_t in_read_index_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", inbound.current_read_index);
+}
+static DEVICE_ATTR_RO(in_read_index);
+
+static ssize_t in_write_index_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", inbound.current_write_index);
+}
+static DEVICE_ATTR_RO(in_write_index);
+
+static ssize_t in_read_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
+}
+static DEVICE_ATTR_RO(in_read_bytes_avail);
+
+static ssize_t in_write_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+ int ret;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
+}
+static DEVICE_ATTR_RO(in_write_bytes_avail);
+
+static ssize_t channel_vp_mapping_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
+ int buf_size = PAGE_SIZE, n_written, tot_written;
+ struct list_head *cur;
+
+ if (!channel)
+ return -ENODEV;
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ tot_written = snprintf(buf, buf_size, "%u:%u\n",
+ channel->offermsg.child_relid, channel->target_cpu);
+
+ list_for_each(cur, &channel->sc_list) {
+ if (tot_written >= buf_size - 1)
+ break;
+
+ cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
+ n_written = scnprintf(buf + tot_written,
+ buf_size - tot_written,
+ "%u:%u\n",
+ cur_sc->offermsg.child_relid,
+ cur_sc->target_cpu);
+ tot_written += n_written;
+ }
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ return tot_written;
+}
+static DEVICE_ATTR_RO(channel_vp_mapping);
+
+static ssize_t vendor_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t device_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ return sprintf(buf, "0x%x\n", hv_dev->device_id);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ int ret;
+
+ ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
+ device_unlock(dev);
+
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
+static struct attribute *vmbus_dev_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_state.attr,
+ &dev_attr_monitor_id.attr,
+ &dev_attr_class_id.attr,
+ &dev_attr_device_id.attr,
+ &dev_attr_modalias.attr,
+#ifdef CONFIG_NUMA
+ &dev_attr_numa_node.attr,
+#endif
+ &dev_attr_server_monitor_pending.attr,
+ &dev_attr_client_monitor_pending.attr,
+ &dev_attr_server_monitor_latency.attr,
+ &dev_attr_client_monitor_latency.attr,
+ &dev_attr_server_monitor_conn_id.attr,
+ &dev_attr_client_monitor_conn_id.attr,
+ &dev_attr_out_intr_mask.attr,
+ &dev_attr_out_read_index.attr,
+ &dev_attr_out_write_index.attr,
+ &dev_attr_out_read_bytes_avail.attr,
+ &dev_attr_out_write_bytes_avail.attr,
+ &dev_attr_in_intr_mask.attr,
+ &dev_attr_in_read_index.attr,
+ &dev_attr_in_write_index.attr,
+ &dev_attr_in_read_bytes_avail.attr,
+ &dev_attr_in_write_bytes_avail.attr,
+ &dev_attr_channel_vp_mapping.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+/*
+ * Device-level attribute_group callback function. Returns the permission for
+ * each attribute, and returns 0 if an attribute is not visible.
+ */
+static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ const struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ /* Hide the monitor attributes if the monitor mechanism is not used. */
+ if (!hv_dev->channel->offermsg.monitor_allocated &&
+ (attr == &dev_attr_monitor_id.attr ||
+ attr == &dev_attr_server_monitor_pending.attr ||
+ attr == &dev_attr_client_monitor_pending.attr ||
+ attr == &dev_attr_server_monitor_latency.attr ||
+ attr == &dev_attr_client_monitor_latency.attr ||
+ attr == &dev_attr_server_monitor_conn_id.attr ||
+ attr == &dev_attr_client_monitor_conn_id.attr))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group vmbus_dev_group = {
+ .attrs = vmbus_dev_attrs,
+ .is_visible = vmbus_dev_attr_is_visible
+};
+__ATTRIBUTE_GROUPS(vmbus_dev);
+
+/* Set up the attribute for /sys/bus/vmbus/hibernation */
+static ssize_t hibernation_show(struct bus_type *bus, char *buf)
+{
+ return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
+}
+
+static BUS_ATTR_RO(hibernation);
+
+static struct attribute *vmbus_bus_attrs[] = {
+ &bus_attr_hibernation.attr,
+ NULL,
+};
+static const struct attribute_group vmbus_bus_group = {
+ .attrs = vmbus_bus_attrs,
+};
+__ATTRIBUTE_GROUPS(vmbus_bus);
+
+/*
+ * vmbus_uevent - add uevent for our device
+ *
+ * This routine is invoked when a device is added or removed on the vmbus to
+ * generate a uevent to udev in the userspace. The udev will then look at its
+ * rule and the uevent generated here to load the appropriate driver
+ *
+ * The alias string will be of the form vmbus:guid where guid is the string
+ * representation of the device guid (each byte of the guid will be
+ * represented with two hex characters.
+ */
+static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
+{
+ struct hv_device *dev = device_to_hv_device(device);
+ const char *format = "MODALIAS=vmbus:%*phN";
+
+ return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
+}
+
+static const struct hv_vmbus_device_id *
+hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
+{
+ if (id == NULL)
+ return NULL; /* empty device table */
+
+ for (; !guid_is_null(&id->guid); id++)
+ if (guid_equal(&id->guid, guid))
+ return id;
+
+ return NULL;
+}
+
+static const struct hv_vmbus_device_id *
+hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
+{
+ const struct hv_vmbus_device_id *id = NULL;
+ struct vmbus_dynid *dynid;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry(dynid, &drv->dynids.list, node) {
+ if (guid_equal(&dynid->id.guid, guid)) {
+ id = &dynid->id;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ return id;
+}
+
+static const struct hv_vmbus_device_id vmbus_device_null;
+
+/*
+ * Return a matching hv_vmbus_device_id pointer.
+ * If there is no match, return NULL.
+ */
+static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
+ struct hv_device *dev)
+{
+ const guid_t *guid = &dev->dev_type;
+ const struct hv_vmbus_device_id *id;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (dev->driver_override && strcmp(dev->driver_override, drv->name))
+ return NULL;
+
+ /* Look at the dynamic ids first, before the static ones */
+ id = hv_vmbus_dynid_match(drv, guid);
+ if (!id)
+ id = hv_vmbus_dev_match(drv->id_table, guid);
+
+ /* driver_override will always match, send a dummy id */
+ if (!id && dev->driver_override)
+ id = &vmbus_device_null;
+
+ return id;
+}
+
+/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
+static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
+{
+ struct vmbus_dynid *dynid;
+
+ dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
+ if (!dynid)
+ return -ENOMEM;
+
+ dynid->id.guid = *guid;
+
+ spin_lock(&drv->dynids.lock);
+ list_add_tail(&dynid->node, &drv->dynids.list);
+ spin_unlock(&drv->dynids.lock);
+
+ return driver_attach(&drv->driver);
+}
+
+static void vmbus_free_dynids(struct hv_driver *drv)
+{
+ struct vmbus_dynid *dynid, *n;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ }
+ spin_unlock(&drv->dynids.lock);
+}
+
+/*
+ * store_new_id - sysfs frontend to vmbus_add_dynid()
+ *
+ * Allow GUIDs to be added to an existing driver via sysfs.
+ */
+static ssize_t new_id_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ guid_t guid;
+ ssize_t retval;
+
+ retval = guid_parse(buf, &guid);
+ if (retval)
+ return retval;
+
+ if (hv_vmbus_dynid_match(drv, &guid))
+ return -EEXIST;
+
+ retval = vmbus_add_dynid(drv, &guid);
+ if (retval)
+ return retval;
+ return count;
+}
+static DRIVER_ATTR_WO(new_id);
+
+/*
+ * store_remove_id - remove a PCI device ID from this driver
+ *
+ * Removes a dynamic pci device ID to this driver.
+ */
+static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ struct vmbus_dynid *dynid, *n;
+ guid_t guid;
+ ssize_t retval;
+
+ retval = guid_parse(buf, &guid);
+ if (retval)
+ return retval;
+
+ retval = -ENODEV;
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ struct hv_vmbus_device_id *id = &dynid->id;
+
+ if (guid_equal(&id->guid, &guid)) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ retval = count;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ return retval;
+}
+static DRIVER_ATTR_WO(remove_id);
+
+static struct attribute *vmbus_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ &driver_attr_remove_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vmbus_drv);
+
+
+/*
+ * vmbus_match - Attempt to match the specified device to the specified driver
+ */
+static int vmbus_match(struct device *device, struct device_driver *driver)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ struct hv_device *hv_dev = device_to_hv_device(device);
+
+ /* The hv_sock driver handles all hv_sock offers. */
+ if (is_hvsock_channel(hv_dev->channel))
+ return drv->hvsock;
+
+ if (hv_vmbus_get_id(drv, hv_dev))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * vmbus_probe - Add the new vmbus's child device
+ */
+static int vmbus_probe(struct device *child_device)
+{
+ int ret = 0;
+ struct hv_driver *drv =
+ drv_to_hv_drv(child_device->driver);
+ struct hv_device *dev = device_to_hv_device(child_device);
+ const struct hv_vmbus_device_id *dev_id;
+
+ dev_id = hv_vmbus_get_id(drv, dev);
+ if (drv->probe) {
+ ret = drv->probe(dev, dev_id);
+ if (ret != 0)
+ pr_err("probe failed for device %s (%d)\n",
+ dev_name(child_device), ret);
+
+ } else {
+ pr_err("probe not set for driver %s\n",
+ dev_name(child_device));
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+/*
+ * vmbus_dma_configure -- Configure DMA coherence for VMbus device
+ */
+static int vmbus_dma_configure(struct device *child_device)
+{
+ /*
+ * On ARM64, propagate the DMA coherence setting from the top level
+ * VMbus ACPI device to the child VMbus device being added here.
+ * On x86/x64 coherence is assumed and these calls have no effect.
+ */
+ hv_setup_dma_ops(child_device,
+ device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT);
+ return 0;
+}
+
+/*
+ * vmbus_remove - Remove a vmbus device
+ */
+static void vmbus_remove(struct device *child_device)
+{
+ struct hv_driver *drv;
+ struct hv_device *dev = device_to_hv_device(child_device);
+
+ if (child_device->driver) {
+ drv = drv_to_hv_drv(child_device->driver);
+ if (drv->remove)
+ drv->remove(dev);
+ }
+}
+
+/*
+ * vmbus_shutdown - Shutdown a vmbus device
+ */
+static void vmbus_shutdown(struct device *child_device)
+{
+ struct hv_driver *drv;
+ struct hv_device *dev = device_to_hv_device(child_device);
+
+
+ /* The device may not be attached yet */
+ if (!child_device->driver)
+ return;
+
+ drv = drv_to_hv_drv(child_device->driver);
+
+ if (drv->shutdown)
+ drv->shutdown(dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * vmbus_suspend - Suspend a vmbus device
+ */
+static int vmbus_suspend(struct device *child_device)
+{
+ struct hv_driver *drv;
+ struct hv_device *dev = device_to_hv_device(child_device);
+
+ /* The device may not be attached yet */
+ if (!child_device->driver)
+ return 0;
+
+ drv = drv_to_hv_drv(child_device->driver);
+ if (!drv->suspend)
+ return -EOPNOTSUPP;
+
+ return drv->suspend(dev);
+}
+
+/*
+ * vmbus_resume - Resume a vmbus device
+ */
+static int vmbus_resume(struct device *child_device)
+{
+ struct hv_driver *drv;
+ struct hv_device *dev = device_to_hv_device(child_device);
+
+ /* The device may not be attached yet */
+ if (!child_device->driver)
+ return 0;
+
+ drv = drv_to_hv_drv(child_device->driver);
+ if (!drv->resume)
+ return -EOPNOTSUPP;
+
+ return drv->resume(dev);
+}
+#else
+#define vmbus_suspend NULL
+#define vmbus_resume NULL
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ * vmbus_device_release - Final callback release of the vmbus child device
+ */
+static void vmbus_device_release(struct device *device)
+{
+ struct hv_device *hv_dev = device_to_hv_device(device);
+ struct vmbus_channel *channel = hv_dev->channel;
+
+ hv_debug_rm_dev_dir(hv_dev);
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+ hv_process_channel_removal(channel);
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ kfree(hv_dev);
+}
+
+/*
+ * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
+ *
+ * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
+ * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
+ * is no way to wake up a Generation-2 VM.
+ *
+ * The other 4 ops are for hibernation.
+ */
+
+static const struct dev_pm_ops vmbus_pm = {
+ .suspend_noirq = NULL,
+ .resume_noirq = NULL,
+ .freeze_noirq = vmbus_suspend,
+ .thaw_noirq = vmbus_resume,
+ .poweroff_noirq = vmbus_suspend,
+ .restore_noirq = vmbus_resume,
+};
+
+/* The one and only one */
+static struct bus_type hv_bus = {
+ .name = "vmbus",
+ .match = vmbus_match,
+ .shutdown = vmbus_shutdown,
+ .remove = vmbus_remove,
+ .probe = vmbus_probe,
+ .uevent = vmbus_uevent,
+ .dma_configure = vmbus_dma_configure,
+ .dev_groups = vmbus_dev_groups,
+ .drv_groups = vmbus_drv_groups,
+ .bus_groups = vmbus_bus_groups,
+ .pm = &vmbus_pm,
+};
+
+struct onmessage_work_context {
+ struct work_struct work;
+ struct {
+ struct hv_message_header header;
+ u8 payload[];
+ } msg;
+};
+
+static void vmbus_onmessage_work(struct work_struct *work)
+{
+ struct onmessage_work_context *ctx;
+
+ /* Do not process messages if we're in DISCONNECTED state */
+ if (vmbus_connection.conn_state == DISCONNECTED)
+ return;
+
+ ctx = container_of(work, struct onmessage_work_context,
+ work);
+ vmbus_onmessage((struct vmbus_channel_message_header *)
+ &ctx->msg.payload);
+ kfree(ctx);
+}
+
+void vmbus_on_msg_dpc(unsigned long data)
+{
+ struct hv_per_cpu_context *hv_cpu = (void *)data;
+ void *page_addr = hv_cpu->synic_message_page;
+ struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ struct vmbus_channel_message_header *hdr;
+ enum vmbus_channel_message_type msgtype;
+ const struct vmbus_channel_message_table_entry *entry;
+ struct onmessage_work_context *ctx;
+ __u8 payload_size;
+ u32 message_type;
+
+ /*
+ * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
+ * it is being used in 'struct vmbus_channel_message_header' definition
+ * which is supposed to match hypervisor ABI.
+ */
+ BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
+
+ /*
+ * Since the message is in memory shared with the host, an erroneous or
+ * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
+ * or individual message handlers are executing; to prevent this, copy
+ * the message into private memory.
+ */
+ memcpy(&msg_copy, msg, sizeof(struct hv_message));
+
+ message_type = msg_copy.header.message_type;
+ if (message_type == HVMSG_NONE)
+ /* no msg */
+ return;
+
+ hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
+ msgtype = hdr->msgtype;
+
+ trace_vmbus_on_msg_dpc(hdr);
+
+ if (msgtype >= CHANNELMSG_COUNT) {
+ WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
+ goto msg_handled;
+ }
+
+ payload_size = msg_copy.header.payload_size;
+ if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
+ WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
+ goto msg_handled;
+ }
+
+ entry = &channel_message_table[msgtype];
+
+ if (!entry->message_handler)
+ goto msg_handled;
+
+ if (payload_size < entry->min_payload_len) {
+ WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
+ goto msg_handled;
+ }
+
+ if (entry->handler_type == VMHT_BLOCKING) {
+ ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC);
+ if (ctx == NULL)
+ return;
+
+ INIT_WORK(&ctx->work, vmbus_onmessage_work);
+ ctx->msg.header = msg_copy.header;
+ memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size);
+
+ /*
+ * The host can generate a rescind message while we
+ * may still be handling the original offer. We deal with
+ * this condition by relying on the synchronization provided
+ * by offer_in_progress and by channel_mutex. See also the
+ * inline comments in vmbus_onoffer_rescind().
+ */
+ switch (msgtype) {
+ case CHANNELMSG_RESCIND_CHANNELOFFER:
+ /*
+ * If we are handling the rescind message;
+ * schedule the work on the global work queue.
+ *
+ * The OFFER message and the RESCIND message should
+ * not be handled by the same serialized work queue,
+ * because the OFFER handler may call vmbus_open(),
+ * which tries to open the channel by sending an
+ * OPEN_CHANNEL message to the host and waits for
+ * the host's response; however, if the host has
+ * rescinded the channel before it receives the
+ * OPEN_CHANNEL message, the host just silently
+ * ignores the OPEN_CHANNEL message; as a result,
+ * the guest's OFFER handler hangs for ever, if we
+ * handle the RESCIND message in the same serialized
+ * work queue: the RESCIND handler can not start to
+ * run before the OFFER handler finishes.
+ */
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
+ queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
+ break;
+
+ case CHANNELMSG_OFFERCHANNEL:
+ /*
+ * The host sends the offer message of a given channel
+ * before sending the rescind message of the same
+ * channel. These messages are sent to the guest's
+ * connect CPU; the guest then starts processing them
+ * in the tasklet handler on this CPU:
+ *
+ * VMBUS_CONNECT_CPU
+ *
+ * [vmbus_on_msg_dpc()]
+ * atomic_inc() // CHANNELMSG_OFFERCHANNEL
+ * queue_work()
+ * ...
+ * [vmbus_on_msg_dpc()]
+ * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
+ *
+ * We rely on the memory-ordering properties of the
+ * queue_work() and schedule_work() primitives, which
+ * guarantee that the atomic increment will be visible
+ * to the CPUs which will execute the offer & rescind
+ * works by the time these works will start execution.
+ */
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
+ atomic_inc(&vmbus_connection.offer_in_progress);
+ fallthrough;
+
+ default:
+ queue_work(vmbus_connection.work_queue, &ctx->work);
+ }
+ } else
+ entry->message_handler(hdr);
+
+msg_handled:
+ vmbus_signal_eom(msg, message_type);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
+ * hibernation, because hv_sock connections can not persist across hibernation.
+ */
+static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
+{
+ struct onmessage_work_context *ctx;
+ struct vmbus_channel_rescind_offer *rescind;
+
+ WARN_ON(!is_hvsock_channel(channel));
+
+ /*
+ * Allocation size is small and the allocation should really not fail,
+ * otherwise the state of the hv_sock connections ends up in limbo.
+ */
+ ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
+ GFP_KERNEL | __GFP_NOFAIL);
+
+ /*
+ * So far, these are not really used by Linux. Just set them to the
+ * reasonable values conforming to the definitions of the fields.
+ */
+ ctx->msg.header.message_type = 1;
+ ctx->msg.header.payload_size = sizeof(*rescind);
+
+ /* These values are actually used by Linux. */
+ rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
+ rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
+ rescind->child_relid = channel->offermsg.child_relid;
+
+ INIT_WORK(&ctx->work, vmbus_onmessage_work);
+
+ queue_work(vmbus_connection.work_queue, &ctx->work);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ * Schedule all channels with events pending
+ */
+static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
+{
+ unsigned long *recv_int_page;
+ u32 maxbits, relid;
+
+ /*
+ * The event page can be directly checked to get the id of
+ * the channel that has the interrupt pending.
+ */
+ void *page_addr = hv_cpu->synic_event_page;
+ union hv_synic_event_flags *event
+ = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
+
+ maxbits = HV_EVENT_FLAGS_COUNT;
+ recv_int_page = event->flags;
+
+ if (unlikely(!recv_int_page))
+ return;
+
+ for_each_set_bit(relid, recv_int_page, maxbits) {
+ void (*callback_fn)(void *context);
+ struct vmbus_channel *channel;
+
+ if (!sync_test_and_clear_bit(relid, recv_int_page))
+ continue;
+
+ /* Special case - vmbus channel protocol msg */
+ if (relid == 0)
+ continue;
+
+ /*
+ * Pairs with the kfree_rcu() in vmbus_chan_release().
+ * Guarantees that the channel data structure doesn't
+ * get freed while the channel pointer below is being
+ * dereferenced.
+ */
+ rcu_read_lock();
+
+ /* Find channel based on relid */
+ channel = relid2channel(relid);
+ if (channel == NULL)
+ goto sched_unlock_rcu;
+
+ if (channel->rescind)
+ goto sched_unlock_rcu;
+
+ /*
+ * Make sure that the ring buffer data structure doesn't get
+ * freed while we dereference the ring buffer pointer. Test
+ * for the channel's onchannel_callback being NULL within a
+ * sched_lock critical section. See also the inline comments
+ * in vmbus_reset_channel_cb().
+ */
+ spin_lock(&channel->sched_lock);
+
+ callback_fn = channel->onchannel_callback;
+ if (unlikely(callback_fn == NULL))
+ goto sched_unlock;
+
+ trace_vmbus_chan_sched(channel);
+
+ ++channel->interrupts;
+
+ switch (channel->callback_mode) {
+ case HV_CALL_ISR:
+ (*callback_fn)(channel->channel_callback_context);
+ break;
+
+ case HV_CALL_BATCHED:
+ hv_begin_read(&channel->inbound);
+ fallthrough;
+ case HV_CALL_DIRECT:
+ tasklet_schedule(&channel->callback_event);
+ }
+
+sched_unlock:
+ spin_unlock(&channel->sched_lock);
+sched_unlock_rcu:
+ rcu_read_unlock();
+ }
+}
+
+static void vmbus_isr(void)
+{
+ struct hv_per_cpu_context *hv_cpu
+ = this_cpu_ptr(hv_context.cpu_context);
+ void *page_addr;
+ struct hv_message *msg;
+
+ vmbus_chan_sched(hv_cpu);
+
+ page_addr = hv_cpu->synic_message_page;
+ msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+
+ /* Check if there are actual msgs to be processed */
+ if (msg->header.message_type != HVMSG_NONE) {
+ if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
+ hv_stimer0_isr();
+ vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
+ } else
+ tasklet_schedule(&hv_cpu->msg_dpc);
+ }
+
+ add_interrupt_randomness(vmbus_interrupt);
+}
+
+static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
+{
+ vmbus_isr();
+ return IRQ_HANDLED;
+}
+
+/*
+ * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
+ * buffer and call into Hyper-V to transfer the data.
+ */
+static void hv_kmsg_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ struct kmsg_dump_iter iter;
+ size_t bytes_written;
+
+ /* We are only interested in panics. */
+ if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
+ return;
+
+ /*
+ * Write dump contents to the page. No need to synchronize; panic should
+ * be single-threaded.
+ */
+ kmsg_dump_rewind(&iter);
+ kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
+ &bytes_written);
+ if (!bytes_written)
+ return;
+ /*
+ * P3 to contain the physical address of the panic page & P4 to
+ * contain the size of the panic data in that page. Rest of the
+ * registers are no-op when the NOTIFY_MSG flag is set.
+ */
+ hv_set_register(HV_REGISTER_CRASH_P0, 0);
+ hv_set_register(HV_REGISTER_CRASH_P1, 0);
+ hv_set_register(HV_REGISTER_CRASH_P2, 0);
+ hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
+ hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
+
+ /*
+ * Let Hyper-V know there is crash data available along with
+ * the panic message.
+ */
+ hv_set_register(HV_REGISTER_CRASH_CTL,
+ (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
+}
+
+static struct kmsg_dumper hv_kmsg_dumper = {
+ .dump = hv_kmsg_dump,
+};
+
+static void hv_kmsg_dump_register(void)
+{
+ int ret;
+
+ hv_panic_page = hv_alloc_hyperv_zeroed_page();
+ if (!hv_panic_page) {
+ pr_err("Hyper-V: panic message page memory allocation failed\n");
+ return;
+ }
+
+ ret = kmsg_dump_register(&hv_kmsg_dumper);
+ if (ret) {
+ pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
+ hv_free_hyperv_page((unsigned long)hv_panic_page);
+ hv_panic_page = NULL;
+ }
+}
+
+static struct ctl_table_header *hv_ctl_table_hdr;
+
+/*
+ * sysctl option to allow the user to control whether kmsg data should be
+ * reported to Hyper-V on panic.
+ */
+static struct ctl_table hv_ctl_table[] = {
+ {
+ .procname = "hyperv_record_panic_msg",
+ .data = &sysctl_record_panic_msg,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE
+ },
+ {}
+};
+
+static struct ctl_table hv_root_table[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = hv_ctl_table
+ },
+ {}
+};
+
+/*
+ * vmbus_bus_init -Main vmbus driver initialization routine.
+ *
+ * Here, we
+ * - initialize the vmbus driver context
+ * - invoke the vmbus hv main init routine
+ * - retrieve the channel offers
+ */
+static int vmbus_bus_init(void)
+{
+ int ret;
+
+ ret = hv_init();
+ if (ret != 0) {
+ pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
+ return ret;
+ }
+
+ ret = bus_register(&hv_bus);
+ if (ret)
+ return ret;
+
+ /*
+ * VMbus interrupts are best modeled as per-cpu interrupts. If
+ * on an architecture with support for per-cpu IRQs (e.g. ARM64),
+ * allocate a per-cpu IRQ using standard Linux kernel functionality.
+ * If not on such an architecture (e.g., x86/x64), then rely on
+ * code in the arch-specific portion of the code tree to connect
+ * the VMbus interrupt handler.
+ */
+
+ if (vmbus_irq == -1) {
+ hv_setup_vmbus_handler(vmbus_isr);
+ } else {
+ vmbus_evt = alloc_percpu(long);
+ ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
+ "Hyper-V VMbus", vmbus_evt);
+ if (ret) {
+ pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
+ vmbus_irq, ret);
+ free_percpu(vmbus_evt);
+ goto err_setup;
+ }
+ }
+
+ ret = hv_synic_alloc();
+ if (ret)
+ goto err_alloc;
+
+ /*
+ * Initialize the per-cpu interrupt state and stimer state.
+ * Then connect to the host.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
+ hv_synic_init, hv_synic_cleanup);
+ if (ret < 0)
+ goto err_alloc;
+ hyperv_cpuhp_online = ret;
+
+ ret = vmbus_connect();
+ if (ret)
+ goto err_connect;
+
+ if (hv_is_isolation_supported())
+ sysctl_record_panic_msg = 0;
+
+ /*
+ * Only register if the crash MSRs are available
+ */
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ u64 hyperv_crash_ctl;
+ /*
+ * Panic message recording (sysctl_record_panic_msg)
+ * is enabled by default in non-isolated guests and
+ * disabled by default in isolated guests; the panic
+ * message recording won't be available in isolated
+ * guests should the following registration fail.
+ */
+ hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
+ if (!hv_ctl_table_hdr)
+ pr_err("Hyper-V: sysctl table register error");
+
+ /*
+ * Register for panic kmsg callback only if the right
+ * capability is supported by the hypervisor.
+ */
+ hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
+ if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
+ hv_kmsg_dump_register();
+
+ register_die_notifier(&hyperv_die_block);
+ }
+
+ /*
+ * Always register the panic notifier because we need to unload
+ * the VMbus channel connection to prevent any VMbus
+ * activity after the VM panics.
+ */
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &hyperv_panic_block);
+
+ vmbus_request_offers();
+
+ return 0;
+
+err_connect:
+ cpuhp_remove_state(hyperv_cpuhp_online);
+err_alloc:
+ hv_synic_free();
+ if (vmbus_irq == -1) {
+ hv_remove_vmbus_handler();
+ } else {
+ free_percpu_irq(vmbus_irq, vmbus_evt);
+ free_percpu(vmbus_evt);
+ }
+err_setup:
+ bus_unregister(&hv_bus);
+ unregister_sysctl_table(hv_ctl_table_hdr);
+ hv_ctl_table_hdr = NULL;
+ return ret;
+}
+
+/**
+ * __vmbus_driver_register() - Register a vmbus's driver
+ * @hv_driver: Pointer to driver structure you want to register
+ * @owner: owner module of the drv
+ * @mod_name: module name string
+ *
+ * Registers the given driver with Linux through the 'driver_register()' call
+ * and sets up the hyper-v vmbus handling for this driver.
+ * It will return the state of the 'driver_register()' call.
+ *
+ */
+int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
+{
+ int ret;
+
+ pr_info("registering driver %s\n", hv_driver->name);
+
+ ret = vmbus_exists();
+ if (ret < 0)
+ return ret;
+
+ hv_driver->driver.name = hv_driver->name;
+ hv_driver->driver.owner = owner;
+ hv_driver->driver.mod_name = mod_name;
+ hv_driver->driver.bus = &hv_bus;
+
+ spin_lock_init(&hv_driver->dynids.lock);
+ INIT_LIST_HEAD(&hv_driver->dynids.list);
+
+ ret = driver_register(&hv_driver->driver);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__vmbus_driver_register);
+
+/**
+ * vmbus_driver_unregister() - Unregister a vmbus's driver
+ * @hv_driver: Pointer to driver structure you want to
+ * un-register
+ *
+ * Un-register the given driver that was previous registered with a call to
+ * vmbus_driver_register()
+ */
+void vmbus_driver_unregister(struct hv_driver *hv_driver)
+{
+ pr_info("unregistering driver %s\n", hv_driver->name);
+
+ if (!vmbus_exists()) {
+ driver_unregister(&hv_driver->driver);
+ vmbus_free_dynids(hv_driver);
+ }
+}
+EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
+
+
+/*
+ * Called when last reference to channel is gone.
+ */
+static void vmbus_chan_release(struct kobject *kobj)
+{
+ struct vmbus_channel *channel
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ kfree_rcu(channel, rcu);
+}
+
+struct vmbus_chan_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct vmbus_channel *chan, char *buf);
+ ssize_t (*store)(struct vmbus_channel *chan,
+ const char *buf, size_t count);
+};
+#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
+ struct vmbus_chan_attribute chan_attr_##_name \
+ = __ATTR(_name, _mode, _show, _store)
+#define VMBUS_CHAN_ATTR_RW(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
+#define VMBUS_CHAN_ATTR_RO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
+#define VMBUS_CHAN_ATTR_WO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
+
+static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ const struct vmbus_chan_attribute *attribute
+ = container_of(attr, struct vmbus_chan_attribute, attr);
+ struct vmbus_channel *chan
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(chan, buf);
+}
+
+static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf,
+ size_t count)
+{
+ const struct vmbus_chan_attribute *attribute
+ = container_of(attr, struct vmbus_chan_attribute, attr);
+ struct vmbus_channel *chan
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(chan, buf, count);
+}
+
+static const struct sysfs_ops vmbus_chan_sysfs_ops = {
+ .show = vmbus_chan_attr_show,
+ .store = vmbus_chan_attr_store,
+};
+
+static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
+{
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
+
+ ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
+}
+static VMBUS_CHAN_ATTR_RO(out_mask);
+
+static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
+
+ ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
+}
+static VMBUS_CHAN_ATTR_RO(in_mask);
+
+static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
+
+ ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
+}
+static VMBUS_CHAN_ATTR_RO(read_avail);
+
+static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
+{
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
+
+ ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
+}
+static VMBUS_CHAN_ATTR_RO(write_avail);
+
+static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%u\n", channel->target_cpu);
+}
+static ssize_t target_cpu_store(struct vmbus_channel *channel,
+ const char *buf, size_t count)
+{
+ u32 target_cpu, origin_cpu;
+ ssize_t ret = count;
+
+ if (vmbus_proto_version < VERSION_WIN10_V4_1)
+ return -EIO;
+
+ if (sscanf(buf, "%uu", &target_cpu) != 1)
+ return -EIO;
+
+ /* Validate target_cpu for the cpumask_test_cpu() operation below. */
+ if (target_cpu >= nr_cpumask_bits)
+ return -EINVAL;
+
+ if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
+ return -EINVAL;
+
+ /* No CPUs should come up or down during this. */
+ cpus_read_lock();
+
+ if (!cpu_online(target_cpu)) {
+ cpus_read_unlock();
+ return -EINVAL;
+ }
+
+ /*
+ * Synchronizes target_cpu_store() and channel closure:
+ *
+ * { Initially: state = CHANNEL_OPENED }
+ *
+ * CPU1 CPU2
+ *
+ * [target_cpu_store()] [vmbus_disconnect_ring()]
+ *
+ * LOCK channel_mutex LOCK channel_mutex
+ * LOAD r1 = state LOAD r2 = state
+ * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
+ * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
+ * [...] SEND CLOSECHANNEL
+ * UNLOCK channel_mutex UNLOCK channel_mutex
+ *
+ * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
+ * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
+ *
+ * Note. The host processes the channel messages "sequentially", in
+ * the order in which they are received on a per-partition basis.
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ /*
+ * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
+ * avoid sending the message and fail here for such channels.
+ */
+ if (channel->state != CHANNEL_OPENED_STATE) {
+ ret = -EIO;
+ goto cpu_store_unlock;
+ }
+
+ origin_cpu = channel->target_cpu;
+ if (target_cpu == origin_cpu)
+ goto cpu_store_unlock;
+
+ if (vmbus_send_modifychannel(channel,
+ hv_cpu_number_to_vp_number(target_cpu))) {
+ ret = -EIO;
+ goto cpu_store_unlock;
+ }
+
+ /*
+ * For version before VERSION_WIN10_V5_3, the following warning holds:
+ *
+ * Warning. At this point, there is *no* guarantee that the host will
+ * have successfully processed the vmbus_send_modifychannel() request.
+ * See the header comment of vmbus_send_modifychannel() for more info.
+ *
+ * Lags in the processing of the above vmbus_send_modifychannel() can
+ * result in missed interrupts if the "old" target CPU is taken offline
+ * before Hyper-V starts sending interrupts to the "new" target CPU.
+ * But apart from this offlining scenario, the code tolerates such
+ * lags. It will function correctly even if a channel interrupt comes
+ * in on a CPU that is different from the channel target_cpu value.
+ */
+
+ channel->target_cpu = target_cpu;
+
+ /* See init_vp_index(). */
+ if (hv_is_perf_channel(channel))
+ hv_update_allocated_cpus(origin_cpu, target_cpu);
+
+ /* Currently set only for storvsc channels. */
+ if (channel->change_target_cpu_callback) {
+ (*channel->change_target_cpu_callback)(channel,
+ origin_cpu, target_cpu);
+ }
+
+cpu_store_unlock:
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
+ return ret;
+}
+static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
+
+static ssize_t channel_pending_show(struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_pending(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
+
+static ssize_t channel_latency_show(struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_latency(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
+
+static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->interrupts);
+}
+static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
+
+static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->sig_events);
+}
+static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
+
+static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->intr_in_full);
+}
+static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
+
+static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->intr_out_empty);
+}
+static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
+
+static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->out_full_first);
+}
+static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
+
+static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->out_full_total);
+}
+static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
+
+static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", channel->offermsg.monitorid);
+}
+static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
+
+static ssize_t subchannel_id_show(struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%u\n",
+ channel->offermsg.offer.sub_channel_index);
+}
+static VMBUS_CHAN_ATTR_RO(subchannel_id);
+
+static struct attribute *vmbus_chan_attrs[] = {
+ &chan_attr_out_mask.attr,
+ &chan_attr_in_mask.attr,
+ &chan_attr_read_avail.attr,
+ &chan_attr_write_avail.attr,
+ &chan_attr_cpu.attr,
+ &chan_attr_pending.attr,
+ &chan_attr_latency.attr,
+ &chan_attr_interrupts.attr,
+ &chan_attr_events.attr,
+ &chan_attr_intr_in_full.attr,
+ &chan_attr_intr_out_empty.attr,
+ &chan_attr_out_full_first.attr,
+ &chan_attr_out_full_total.attr,
+ &chan_attr_monitor_id.attr,
+ &chan_attr_subchannel_id.attr,
+ NULL
+};
+
+/*
+ * Channel-level attribute_group callback function. Returns the permission for
+ * each attribute, and returns 0 if an attribute is not visible.
+ */
+static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ const struct vmbus_channel *channel =
+ container_of(kobj, struct vmbus_channel, kobj);
+
+ /* Hide the monitor attributes if the monitor mechanism is not used. */
+ if (!channel->offermsg.monitor_allocated &&
+ (attr == &chan_attr_pending.attr ||
+ attr == &chan_attr_latency.attr ||
+ attr == &chan_attr_monitor_id.attr))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group vmbus_chan_group = {
+ .attrs = vmbus_chan_attrs,
+ .is_visible = vmbus_chan_attr_is_visible
+};
+
+static struct kobj_type vmbus_chan_ktype = {
+ .sysfs_ops = &vmbus_chan_sysfs_ops,
+ .release = vmbus_chan_release,
+};
+
+/*
+ * vmbus_add_channel_kobj - setup a sub-directory under device/channels
+ */
+int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+{
+ const struct device *device = &dev->device;
+ struct kobject *kobj = &channel->kobj;
+ u32 relid = channel->offermsg.child_relid;
+ int ret;
+
+ kobj->kset = dev->channels_kset;
+ ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
+ "%u", relid);
+ if (ret) {
+ kobject_put(kobj);
+ return ret;
+ }
+
+ ret = sysfs_create_group(kobj, &vmbus_chan_group);
+
+ if (ret) {
+ /*
+ * The calling functions' error handling paths will cleanup the
+ * empty channel directory.
+ */
+ kobject_put(kobj);
+ dev_err(device, "Unable to set up channel sysfs files\n");
+ return ret;
+ }
+
+ kobject_uevent(kobj, KOBJ_ADD);
+
+ return 0;
+}
+
+/*
+ * vmbus_remove_channel_attr_group - remove the channel's attribute group
+ */
+void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
+{
+ sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
+}
+
+/*
+ * vmbus_device_create - Creates and registers a new child device
+ * on the vmbus.
+ */
+struct hv_device *vmbus_device_create(const guid_t *type,
+ const guid_t *instance,
+ struct vmbus_channel *channel)
+{
+ struct hv_device *child_device_obj;
+
+ child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
+ if (!child_device_obj) {
+ pr_err("Unable to allocate device object for child device\n");
+ return NULL;
+ }
+
+ child_device_obj->channel = channel;
+ guid_copy(&child_device_obj->dev_type, type);
+ guid_copy(&child_device_obj->dev_instance, instance);
+ child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT;
+
+ return child_device_obj;
+}
+
+/*
+ * vmbus_device_register - Register the child device
+ */
+int vmbus_device_register(struct hv_device *child_device_obj)
+{
+ struct kobject *kobj = &child_device_obj->device.kobj;
+ int ret;
+
+ dev_set_name(&child_device_obj->device, "%pUl",
+ &child_device_obj->channel->offermsg.offer.if_instance);
+
+ child_device_obj->device.bus = &hv_bus;
+ child_device_obj->device.parent = &hv_acpi_dev->dev;
+ child_device_obj->device.release = vmbus_device_release;
+
+ child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+ child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+ dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
+
+ /*
+ * Register with the LDM. This will kick off the driver/device
+ * binding...which will eventually call vmbus_match() and vmbus_probe()
+ */
+ ret = device_register(&child_device_obj->device);
+ if (ret) {
+ pr_err("Unable to register child device\n");
+ put_device(&child_device_obj->device);
+ return ret;
+ }
+
+ child_device_obj->channels_kset = kset_create_and_add("channels",
+ NULL, kobj);
+ if (!child_device_obj->channels_kset) {
+ ret = -ENOMEM;
+ goto err_dev_unregister;
+ }
+
+ ret = vmbus_add_channel_kobj(child_device_obj,
+ child_device_obj->channel);
+ if (ret) {
+ pr_err("Unable to register primary channeln");
+ goto err_kset_unregister;
+ }
+ hv_debug_add_dev_dir(child_device_obj);
+
+ return 0;
+
+err_kset_unregister:
+ kset_unregister(child_device_obj->channels_kset);
+
+err_dev_unregister:
+ device_unregister(&child_device_obj->device);
+ return ret;
+}
+
+/*
+ * vmbus_device_unregister - Remove the specified child device
+ * from the vmbus.
+ */
+void vmbus_device_unregister(struct hv_device *device_obj)
+{
+ pr_debug("child device %s unregistered\n",
+ dev_name(&device_obj->device));
+
+ kset_unregister(device_obj->channels_kset);
+
+ /*
+ * Kick off the process of unregistering the device.
+ * This will call vmbus_remove() and eventually vmbus_device_release()
+ */
+ device_unregister(&device_obj->device);
+}
+
+
+/*
+ * VMBUS is an acpi enumerated device. Get the information we
+ * need from DSDT.
+ */
+#define VTPM_BASE_ADDRESS 0xfed40000
+static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
+{
+ resource_size_t start = 0;
+ resource_size_t end = 0;
+ struct resource *new_res;
+ struct resource **old_res = &hyperv_mmio;
+ struct resource **prev_res = NULL;
+ struct resource r;
+
+ switch (res->type) {
+
+ /*
+ * "Address" descriptors are for bus windows. Ignore
+ * "memory" descriptors, which are for registers on
+ * devices.
+ */
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ start = res->data.address32.address.minimum;
+ end = res->data.address32.address.maximum;
+ break;
+
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ start = res->data.address64.address.minimum;
+ end = res->data.address64.address.maximum;
+ break;
+
+ /*
+ * The IRQ information is needed only on ARM64, which Hyper-V
+ * sets up in the extended format. IRQ information is present
+ * on x86/x64 in the non-extended format but it is not used by
+ * Linux. So don't bother checking for the non-extended format.
+ */
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ if (!acpi_dev_resource_interrupt(res, 0, &r)) {
+ pr_err("Unable to parse Hyper-V ACPI interrupt\n");
+ return AE_ERROR;
+ }
+ /* ARM64 INTID for VMbus */
+ vmbus_interrupt = res->data.extended_irq.interrupts[0];
+ /* Linux IRQ number */
+ vmbus_irq = r.start;
+ return AE_OK;
+
+ default:
+ /* Unused resource type */
+ return AE_OK;
+
+ }
+ /*
+ * Ignore ranges that are below 1MB, as they're not
+ * necessary or useful here.
+ */
+ if (end < 0x100000)
+ return AE_OK;
+
+ new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
+ if (!new_res)
+ return AE_NO_MEMORY;
+
+ /* If this range overlaps the virtual TPM, truncate it. */
+ if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
+ end = VTPM_BASE_ADDRESS;
+
+ new_res->name = "hyperv mmio";
+ new_res->flags = IORESOURCE_MEM;
+ new_res->start = start;
+ new_res->end = end;
+
+ /*
+ * If two ranges are adjacent, merge them.
+ */
+ do {
+ if (!*old_res) {
+ *old_res = new_res;
+ break;
+ }
+
+ if (((*old_res)->end + 1) == new_res->start) {
+ (*old_res)->end = new_res->end;
+ kfree(new_res);
+ break;
+ }
+
+ if ((*old_res)->start == new_res->end + 1) {
+ (*old_res)->start = new_res->start;
+ kfree(new_res);
+ break;
+ }
+
+ if ((*old_res)->start > new_res->end) {
+ new_res->sibling = *old_res;
+ if (prev_res)
+ (*prev_res)->sibling = new_res;
+ *old_res = new_res;
+ break;
+ }
+
+ prev_res = old_res;
+ old_res = &(*old_res)->sibling;
+
+ } while (1);
+
+ return AE_OK;
+}
+
+static int vmbus_acpi_remove(struct acpi_device *device)
+{
+ struct resource *cur_res;
+ struct resource *next_res;
+
+ if (hyperv_mmio) {
+ if (fb_mmio) {
+ __release_region(hyperv_mmio, fb_mmio->start,
+ resource_size(fb_mmio));
+ fb_mmio = NULL;
+ }
+
+ for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
+ next_res = cur_res->sibling;
+ kfree(cur_res);
+ }
+ }
+
+ return 0;
+}
+
+static void vmbus_reserve_fb(void)
+{
+ resource_size_t start = 0, size;
+ struct pci_dev *pdev;
+
+ if (efi_enabled(EFI_BOOT)) {
+ /* Gen2 VM: get FB base from EFI framebuffer */
+ start = screen_info.lfb_base;
+ size = max_t(__u32, screen_info.lfb_size, 0x800000);
+ } else {
+ /* Gen1 VM: get FB base from PCI */
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev)
+ return;
+
+ if (pdev->resource[0].flags & IORESOURCE_MEM) {
+ start = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ }
+
+ /*
+ * Release the PCI device so hyperv_drm or hyperv_fb driver can
+ * grab it later.
+ */
+ pci_dev_put(pdev);
+ }
+
+ if (!start)
+ return;
+
+ /*
+ * Make a claim for the frame buffer in the resource tree under the
+ * first node, which will be the one below 4GB. The length seems to
+ * be underreported, particularly in a Generation 1 VM. So start out
+ * reserving a larger area and make it smaller until it succeeds.
+ */
+ for (; !fb_mmio && (size >= 0x100000); size >>= 1)
+ fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
+}
+
+/**
+ * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
+ * @new: If successful, supplied a pointer to the
+ * allocated MMIO space.
+ * @device_obj: Identifies the caller
+ * @min: Minimum guest physical address of the
+ * allocation
+ * @max: Maximum guest physical address
+ * @size: Size of the range to be allocated
+ * @align: Alignment of the range to be allocated
+ * @fb_overlap_ok: Whether this allocation can be allowed
+ * to overlap the video frame buffer.
+ *
+ * This function walks the resources granted to VMBus by the
+ * _CRS object in the ACPI namespace underneath the parent
+ * "bridge" whether that's a root PCI bus in the Generation 1
+ * case or a Module Device in the Generation 2 case. It then
+ * attempts to allocate from the global MMIO pool in a way that
+ * matches the constraints supplied in these parameters and by
+ * that _CRS.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+ resource_size_t min, resource_size_t max,
+ resource_size_t size, resource_size_t align,
+ bool fb_overlap_ok)
+{
+ struct resource *iter, *shadow;
+ resource_size_t range_min, range_max, start, end;
+ const char *dev_n = dev_name(&device_obj->device);
+ int retval;
+
+ retval = -ENXIO;
+ mutex_lock(&hyperv_mmio_lock);
+
+ /*
+ * If overlaps with frame buffers are allowed, then first attempt to
+ * make the allocation from within the reserved region. Because it
+ * is already reserved, no shadow allocation is necessary.
+ */
+ if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
+ !(max < fb_mmio->start)) {
+
+ range_min = fb_mmio->start;
+ range_max = fb_mmio->end;
+ start = (range_min + align - 1) & ~(align - 1);
+ for (; start + size - 1 <= range_max; start += align) {
+ *new = request_mem_region_exclusive(start, size, dev_n);
+ if (*new) {
+ retval = 0;
+ goto exit;
+ }
+ }
+ }
+
+ for (iter = hyperv_mmio; iter; iter = iter->sibling) {
+ if ((iter->start >= max) || (iter->end <= min))
+ continue;
+
+ range_min = iter->start;
+ range_max = iter->end;
+ start = (range_min + align - 1) & ~(align - 1);
+ for (; start + size - 1 <= range_max; start += align) {
+ end = start + size - 1;
+
+ /* Skip the whole fb_mmio region if not fb_overlap_ok */
+ if (!fb_overlap_ok && fb_mmio &&
+ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
+ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
+ continue;
+
+ shadow = __request_region(iter, start, size, NULL,
+ IORESOURCE_BUSY);
+ if (!shadow)
+ continue;
+
+ *new = request_mem_region_exclusive(start, size, dev_n);
+ if (*new) {
+ shadow->name = (char *)*new;
+ retval = 0;
+ goto exit;
+ }
+
+ __release_region(iter, start, size);
+ }
+ }
+
+exit:
+ mutex_unlock(&hyperv_mmio_lock);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
+
+/**
+ * vmbus_free_mmio() - Free a memory-mapped I/O range.
+ * @start: Base address of region to release.
+ * @size: Size of the range to be allocated
+ *
+ * This function releases anything requested by
+ * vmbus_mmio_allocate().
+ */
+void vmbus_free_mmio(resource_size_t start, resource_size_t size)
+{
+ struct resource *iter;
+
+ mutex_lock(&hyperv_mmio_lock);
+ for (iter = hyperv_mmio; iter; iter = iter->sibling) {
+ if ((iter->start >= start + size) || (iter->end <= start))
+ continue;
+
+ __release_region(iter, start, size);
+ }
+ release_mem_region(start, size);
+ mutex_unlock(&hyperv_mmio_lock);
+
+}
+EXPORT_SYMBOL_GPL(vmbus_free_mmio);
+
+static int vmbus_acpi_add(struct acpi_device *device)
+{
+ acpi_status result;
+ int ret_val = -ENODEV;
+ struct acpi_device *ancestor;
+
+ hv_acpi_dev = device;
+
+ /*
+ * Older versions of Hyper-V for ARM64 fail to include the _CCA
+ * method on the top level VMbus device in the DSDT. But devices
+ * are hardware coherent in all current Hyper-V use cases, so fix
+ * up the ACPI device to behave as if _CCA is present and indicates
+ * hardware coherence.
+ */
+ ACPI_COMPANION_SET(&device->dev, device);
+ if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
+ device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
+ pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
+ device->flags.cca_seen = true;
+ device->flags.coherent_dma = true;
+ }
+
+ result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ vmbus_walk_resources, NULL);
+
+ if (ACPI_FAILURE(result))
+ goto acpi_walk_err;
+ /*
+ * Some ancestor of the vmbus acpi device (Gen1 or Gen2
+ * firmware) is the VMOD that has the mmio ranges. Get that.
+ */
+ for (ancestor = acpi_dev_parent(device);
+ ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
+ ancestor = acpi_dev_parent(ancestor)) {
+ result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
+ vmbus_walk_resources, NULL);
+
+ if (ACPI_FAILURE(result))
+ continue;
+ if (hyperv_mmio) {
+ vmbus_reserve_fb();
+ break;
+ }
+ }
+ ret_val = 0;
+
+acpi_walk_err:
+ if (ret_val)
+ vmbus_acpi_remove(device);
+ return ret_val;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vmbus_bus_suspend(struct device *dev)
+{
+ struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
+ hv_context.cpu_context, VMBUS_CONNECT_CPU);
+ struct vmbus_channel *channel, *sc;
+
+ tasklet_disable(&hv_cpu->msg_dpc);
+ vmbus_connection.ignore_any_offer_msg = true;
+ /* The tasklet_enable() takes care of providing a memory barrier */
+ tasklet_enable(&hv_cpu->msg_dpc);
+
+ /* Drain all the workqueues as we are in suspend */
+ drain_workqueue(vmbus_connection.rescind_work_queue);
+ drain_workqueue(vmbus_connection.work_queue);
+ drain_workqueue(vmbus_connection.handle_primary_chan_wq);
+ drain_workqueue(vmbus_connection.handle_sub_chan_wq);
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (!is_hvsock_channel(channel))
+ continue;
+
+ vmbus_force_channel_rescinded(channel);
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ /*
+ * Wait until all the sub-channels and hv_sock channels have been
+ * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
+ * they would conflict with the new sub-channels that will be created
+ * in the resume path. hv_sock channels should also be destroyed, but
+ * a hv_sock channel of an established hv_sock connection can not be
+ * really destroyed since it may still be referenced by the userspace
+ * application, so we just force the hv_sock channel to be rescinded
+ * by vmbus_force_channel_rescinded(), and the userspace application
+ * will thoroughly destroy the channel after hibernation.
+ *
+ * Note: the counter nr_chan_close_on_suspend may never go above 0 if
+ * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
+ */
+ if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
+ wait_for_completion(&vmbus_connection.ready_for_suspend_event);
+
+ if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
+ pr_err("Can not suspend due to a previous failed resuming\n");
+ return -EBUSY;
+ }
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ /*
+ * Remove the channel from the array of channels and invalidate
+ * the channel's relid. Upon resume, vmbus_onoffer() will fix
+ * up the relid (and other fields, if necessary) and add the
+ * channel back to the array.
+ */
+ vmbus_channel_unmap_relid(channel);
+ channel->offermsg.child_relid = INVALID_RELID;
+
+ if (is_hvsock_channel(channel)) {
+ if (!channel->rescind) {
+ pr_err("hv_sock channel not rescinded!\n");
+ WARN_ON_ONCE(1);
+ }
+ continue;
+ }
+
+ list_for_each_entry(sc, &channel->sc_list, sc_list) {
+ pr_err("Sub-channel not deleted!\n");
+ WARN_ON_ONCE(1);
+ }
+
+ atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
+ }
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ vmbus_initiate_unload(false);
+
+ /* Reset the event for the next resume. */
+ reinit_completion(&vmbus_connection.ready_for_resume_event);
+
+ return 0;
+}
+
+static int vmbus_bus_resume(struct device *dev)
+{
+ struct vmbus_channel_msginfo *msginfo;
+ size_t msgsize;
+ int ret;
+
+ vmbus_connection.ignore_any_offer_msg = false;
+
+ /*
+ * We only use the 'vmbus_proto_version', which was in use before
+ * hibernation, to re-negotiate with the host.
+ */
+ if (!vmbus_proto_version) {
+ pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
+ return -EINVAL;
+ }
+
+ msgsize = sizeof(*msginfo) +
+ sizeof(struct vmbus_channel_initiate_contact);
+
+ msginfo = kzalloc(msgsize, GFP_KERNEL);
+
+ if (msginfo == NULL)
+ return -ENOMEM;
+
+ ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
+
+ kfree(msginfo);
+
+ if (ret != 0)
+ return ret;
+
+ WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
+
+ vmbus_request_offers();
+
+ if (wait_for_completion_timeout(
+ &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
+ pr_err("Some vmbus device is missing after suspending?\n");
+
+ /* Reset the event for the next suspend. */
+ reinit_completion(&vmbus_connection.ready_for_suspend_event);
+
+ return 0;
+}
+#else
+#define vmbus_bus_suspend NULL
+#define vmbus_bus_resume NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct acpi_device_id vmbus_acpi_device_ids[] = {
+ {"VMBUS", 0},
+ {"VMBus", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
+
+/*
+ * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
+ * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
+ * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
+ * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
+ * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
+ * resume callback must also run via the "noirq" ops.
+ *
+ * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
+ * earlier in this file before vmbus_pm.
+ */
+
+static const struct dev_pm_ops vmbus_bus_pm = {
+ .suspend_noirq = NULL,
+ .resume_noirq = NULL,
+ .freeze_noirq = vmbus_bus_suspend,
+ .thaw_noirq = vmbus_bus_resume,
+ .poweroff_noirq = vmbus_bus_suspend,
+ .restore_noirq = vmbus_bus_resume
+};
+
+static struct acpi_driver vmbus_acpi_driver = {
+ .name = "vmbus",
+ .ids = vmbus_acpi_device_ids,
+ .ops = {
+ .add = vmbus_acpi_add,
+ .remove = vmbus_acpi_remove,
+ },
+ .drv.pm = &vmbus_bus_pm,
+ .drv.probe_type = PROBE_FORCE_SYNCHRONOUS,
+};
+
+static void hv_kexec_handler(void)
+{
+ hv_stimer_global_cleanup();
+ vmbus_initiate_unload(false);
+ /* Make sure conn_state is set as hv_synic_cleanup checks for it */
+ mb();
+ cpuhp_remove_state(hyperv_cpuhp_online);
+};
+
+static void hv_crash_handler(struct pt_regs *regs)
+{
+ int cpu;
+
+ vmbus_initiate_unload(true);
+ /*
+ * In crash handler we can't schedule synic cleanup for all CPUs,
+ * doing the cleanup for current CPU only. This should be sufficient
+ * for kdump.
+ */
+ cpu = smp_processor_id();
+ hv_stimer_cleanup(cpu);
+ hv_synic_disable_regs(cpu);
+};
+
+static int hv_synic_suspend(void)
+{
+ /*
+ * When we reach here, all the non-boot CPUs have been offlined.
+ * If we're in a legacy configuration where stimer Direct Mode is
+ * not enabled, the stimers on the non-boot CPUs have been unbound
+ * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
+ * hv_stimer_cleanup() -> clockevents_unbind_device().
+ *
+ * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
+ * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
+ * 1) it's unnecessary as interrupts remain disabled between
+ * syscore_suspend() and syscore_resume(): see create_image() and
+ * resume_target_kernel()
+ * 2) the stimer on CPU0 is automatically disabled later by
+ * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
+ * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
+ * 3) a warning would be triggered if we call
+ * clockevents_unbind_device(), which may sleep, in an
+ * interrupts-disabled context.
+ */
+
+ hv_synic_disable_regs(0);
+
+ return 0;
+}
+
+static void hv_synic_resume(void)
+{
+ hv_synic_enable_regs(0);
+
+ /*
+ * Note: we don't need to call hv_stimer_init(0), because the timer
+ * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
+ * automatically re-enabled in timekeeping_resume().
+ */
+}
+
+/* The callbacks run only on CPU0, with irqs_disabled. */
+static struct syscore_ops hv_synic_syscore_ops = {
+ .suspend = hv_synic_suspend,
+ .resume = hv_synic_resume,
+};
+
+static int __init hv_acpi_init(void)
+{
+ int ret;
+
+ if (!hv_is_hyperv_initialized())
+ return -ENODEV;
+
+ if (hv_root_partition)
+ return 0;
+
+ /*
+ * Get ACPI resources first.
+ */
+ ret = acpi_bus_register_driver(&vmbus_acpi_driver);
+
+ if (ret)
+ return ret;
+
+ if (!hv_acpi_dev) {
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
+ /*
+ * If we're on an architecture with a hardcoded hypervisor
+ * vector (i.e. x86/x64), override the VMbus interrupt found
+ * in the ACPI tables. Ensure vmbus_irq is not set since the
+ * normal Linux IRQ mechanism is not used in this case.
+ */
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
+ vmbus_irq = -1;
+#endif
+
+ hv_debug_init();
+
+ ret = vmbus_bus_init();
+ if (ret)
+ goto cleanup;
+
+ hv_setup_kexec_handler(hv_kexec_handler);
+ hv_setup_crash_handler(hv_crash_handler);
+
+ register_syscore_ops(&hv_synic_syscore_ops);
+
+ return 0;
+
+cleanup:
+ acpi_bus_unregister_driver(&vmbus_acpi_driver);
+ hv_acpi_dev = NULL;
+ return ret;
+}
+
+static void __exit vmbus_exit(void)
+{
+ int cpu;
+
+ unregister_syscore_ops(&hv_synic_syscore_ops);
+
+ hv_remove_kexec_handler();
+ hv_remove_crash_handler();
+ vmbus_connection.conn_state = DISCONNECTED;
+ hv_stimer_global_cleanup();
+ vmbus_disconnect();
+ if (vmbus_irq == -1) {
+ hv_remove_vmbus_handler();
+ } else {
+ free_percpu_irq(vmbus_irq, vmbus_evt);
+ free_percpu(vmbus_evt);
+ }
+ for_each_online_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ tasklet_kill(&hv_cpu->msg_dpc);
+ }
+ hv_debug_rm_all_dir();
+
+ vmbus_free_channels();
+ kfree(vmbus_connection.channels);
+
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ kmsg_dump_unregister(&hv_kmsg_dumper);
+ unregister_die_notifier(&hyperv_die_block);
+ }
+
+ /*
+ * The panic notifier is always registered, hence we should
+ * also unconditionally unregister it here as well.
+ */
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &hyperv_panic_block);
+
+ free_page((unsigned long)hv_panic_page);
+ unregister_sysctl_table(hv_ctl_table_hdr);
+ hv_ctl_table_hdr = NULL;
+ bus_unregister(&hv_bus);
+
+ cpuhp_remove_state(hyperv_cpuhp_online);
+ hv_synic_free();
+ acpi_bus_unregister_driver(&vmbus_acpi_driver);
+}
+
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
+
+subsys_initcall(hv_acpi_init);
+module_exit(vmbus_exit);