summaryrefslogtreecommitdiffstats
path: root/src/VBox/Devices/VMMDev
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
commitf215e02bf85f68d3a6106c2a1f4f7f063f819064 (patch)
tree6bb5b92c046312c4e95ac2620b10ddf482d3fa8b /src/VBox/Devices/VMMDev
parentInitial commit. (diff)
downloadvirtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.tar.xz
virtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.zip
Adding upstream version 7.0.14-dfsg.upstream/7.0.14-dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/Devices/VMMDev')
-rw-r--r--src/VBox/Devices/VMMDev/Makefile.kup0
-rw-r--r--src/VBox/Devices/VMMDev/VMMDev.cpp5227
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevHGCM.cpp2776
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevHGCM.h62
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevState.h589
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevTesting.cpp1111
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevTesting.h46
7 files changed, 9811 insertions, 0 deletions
diff --git a/src/VBox/Devices/VMMDev/Makefile.kup b/src/VBox/Devices/VMMDev/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/Makefile.kup
diff --git a/src/VBox/Devices/VMMDev/VMMDev.cpp b/src/VBox/Devices/VMMDev/VMMDev.cpp
new file mode 100644
index 00000000..341ae439
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDev.cpp
@@ -0,0 +1,5227 @@
+/* $Id: VMMDev.cpp $ */
+/** @file
+ * VMMDev - Guest <-> VMM/Host communication device.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/** @page pg_vmmdev The VMM Device.
+ *
+ * The VMM device is a custom hardware device emulation for communicating with
+ * the guest additions.
+ *
+ * Whenever host wants to inform guest about something an IRQ notification will
+ * be raised.
+ *
+ * VMMDev PDM interface will contain the guest notification method.
+ *
+ * There is a 32 bit event mask which will be read by guest on an interrupt. A
+ * non zero bit in the mask means that the specific event occurred and requires
+ * processing on guest side.
+ *
+ * After reading the event mask guest must issue a generic request
+ * AcknowlegdeEvents.
+ *
+ * IRQ line is set to 1 (request) if there are unprocessed events, that is the
+ * event mask is not zero.
+ *
+ * After receiving an interrupt and checking event mask, the guest must process
+ * events using the event specific mechanism.
+ *
+ * That is if mouse capabilities were changed, guest will use
+ * VMMDev_GetMouseStatus generic request.
+ *
+ * Event mask is only a set of flags indicating that guest must proceed with a
+ * procedure.
+ *
+ * Unsupported events are therefore ignored. The guest additions must inform
+ * host which events they want to receive, to avoid unnecessary IRQ processing.
+ * By default no events are signalled to guest.
+ *
+ * This seems to be fast method. It requires only one context switch for an
+ * event processing.
+ *
+ *
+ * @section sec_vmmdev_heartbeat Heartbeat
+ *
+ * The heartbeat is a feature to monitor whether the guest OS is hung or not.
+ *
+ * The main kernel component of the guest additions, VBoxGuest, sets up a timer
+ * at a frequency returned by VMMDevReq_HeartbeatConfigure
+ * (VMMDevReqHeartbeat::cNsInterval, VMMDEV::cNsHeartbeatInterval) and performs
+ * a VMMDevReq_GuestHeartbeat request every time the timer ticks.
+ *
+ * The host side (VMMDev) arms a timer with a more distant deadline
+ * (VMMDEV::cNsHeartbeatTimeout), twice cNsHeartbeatInterval by default. Each
+ * time a VMMDevReq_GuestHeartbeat request comes in, the timer is rearmed with
+ * the same relative deadline. So, as long as VMMDevReq_GuestHeartbeat comes
+ * when they should, the host timer will never fire.
+ *
+ * When the timer fires, we consider the guest as hung / flatlined / dead.
+ * Currently we only LogRel that, but it's easy to extend this with an event in
+ * Main API.
+ *
+ * Should the guest reawaken at some later point, we LogRel that event and
+ * continue as normal. Again something which would merit an API event.
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+/* Enable dev_vmm Log3 statements to get IRQ-related logging. */
+#define LOG_GROUP LOG_GROUP_DEV_VMM
+#include <VBox/AssertGuest.h>
+#include <VBox/VMMDev.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/log.h>
+#include <VBox/param.h>
+#include <iprt/path.h>
+#include <iprt/dir.h>
+#include <iprt/file.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/err.h>
+#include <VBox/dbg.h>
+#include <VBox/version.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h> /* ASMReadTsc */
+#endif
+#include <iprt/assert.h>
+#include <iprt/buildconfig.h>
+#include <iprt/string.h>
+#include <iprt/system.h>
+#include <iprt/time.h>
+#ifndef IN_RC
+# include <iprt/mem.h>
+# include <iprt/memsafer.h>
+#endif
+#ifdef IN_RING3
+# include <iprt/uuid.h>
+#endif
+
+#include "VMMDevState.h"
+#ifdef VBOX_WITH_HGCM
+# include "VMMDevHGCM.h"
+#endif
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+# include "VMMDevTesting.h"
+#endif
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define VMMDEV_INTERFACE_VERSION_IS_1_03(s) \
+ ( RT_HIWORD((s)->guestInfo.interfaceVersion) == 1 \
+ && RT_LOWORD((s)->guestInfo.interfaceVersion) == 3 )
+
+#define VMMDEV_INTERFACE_VERSION_IS_OK(additionsVersion) \
+ ( RT_HIWORD(additionsVersion) == RT_HIWORD(VMMDEV_VERSION) \
+ && RT_LOWORD(additionsVersion) <= RT_LOWORD(VMMDEV_VERSION) )
+
+#define VMMDEV_INTERFACE_VERSION_IS_OLD(additionsVersion) \
+ ( (RT_HIWORD(additionsVersion) < RT_HIWORD(VMMDEV_VERSION) \
+ || ( RT_HIWORD(additionsVersion) == RT_HIWORD(VMMDEV_VERSION) \
+ && RT_LOWORD(additionsVersion) <= RT_LOWORD(VMMDEV_VERSION) ) )
+
+#define VMMDEV_INTERFACE_VERSION_IS_TOO_OLD(additionsVersion) \
+ ( RT_HIWORD(additionsVersion) < RT_HIWORD(VMMDEV_VERSION) )
+
+#define VMMDEV_INTERFACE_VERSION_IS_NEW(additionsVersion) \
+ ( RT_HIWORD(additionsVersion) > RT_HIWORD(VMMDEV_VERSION) \
+ || ( RT_HIWORD(additionsVersion) == RT_HIWORD(VMMDEV_VERSION) \
+ && RT_LOWORD(additionsVersion) > RT_LOWORD(VMMDEV_VERSION) ) )
+
+/** Default interval in nanoseconds between guest heartbeats.
+ * Used when no HeartbeatInterval is set in CFGM and for setting
+ * HB check timer if the guest's heartbeat frequency is less than 1Hz. */
+#define VMMDEV_HEARTBEAT_DEFAULT_INTERVAL (2U*RT_NS_1SEC_64)
+
+
+#ifndef VBOX_DEVICE_STRUCT_TESTCASE
+#ifdef IN_RING3
+
+/** DISPLAYCHANGEDATA field descriptors for the v18+ saved state. */
+static SSMFIELD const g_aSSMDISPLAYCHANGEDATAStateFields[] =
+{
+ SSMFIELD_ENTRY(DISPLAYCHANGEDATA, iCurrentMonitor),
+ SSMFIELD_ENTRY(DISPLAYCHANGEDATA, fGuestSentChangeEventAck),
+ SSMFIELD_ENTRY(DISPLAYCHANGEDATA, afAlignment),
+ SSMFIELD_ENTRY(DISPLAYCHANGEDATA, aRequests),
+ SSMFIELD_ENTRY_TERM()
+};
+
+/* -=-=-=-=- Misc Helpers -=-=-=-=- */
+
+/**
+ * Log information about the Guest Additions.
+ *
+ * @param pGuestInfo The information we've got from the Guest Additions driver.
+ */
+static void vmmdevLogGuestOsInfo(VBoxGuestInfo *pGuestInfo)
+{
+ const char *pszOs;
+ switch (pGuestInfo->osType & ~VBOXOSTYPE_x64)
+ {
+ case VBOXOSTYPE_DOS: pszOs = "DOS"; break;
+ case VBOXOSTYPE_Win31: pszOs = "Windows 3.1"; break;
+ case VBOXOSTYPE_Win9x: pszOs = "Windows 9x"; break;
+ case VBOXOSTYPE_Win95: pszOs = "Windows 95"; break;
+ case VBOXOSTYPE_Win98: pszOs = "Windows 98"; break;
+ case VBOXOSTYPE_WinMe: pszOs = "Windows Me"; break;
+ case VBOXOSTYPE_WinNT: pszOs = "Windows NT"; break;
+ case VBOXOSTYPE_WinNT3x: pszOs = "Windows NT 3.x"; break;
+ case VBOXOSTYPE_WinNT4: pszOs = "Windows NT4"; break;
+ case VBOXOSTYPE_Win2k: pszOs = "Windows 2k"; break;
+ case VBOXOSTYPE_WinXP: pszOs = "Windows XP"; break;
+ case VBOXOSTYPE_Win2k3: pszOs = "Windows 2k3"; break;
+ case VBOXOSTYPE_WinVista: pszOs = "Windows Vista"; break;
+ case VBOXOSTYPE_Win2k8: pszOs = "Windows 2k8"; break;
+ case VBOXOSTYPE_Win7: pszOs = "Windows 7"; break;
+ case VBOXOSTYPE_Win8: pszOs = "Windows 8"; break;
+ case VBOXOSTYPE_Win2k12_x64 & ~VBOXOSTYPE_x64: pszOs = "Windows 2k12"; break;
+ case VBOXOSTYPE_Win81: pszOs = "Windows 8.1"; break;
+ case VBOXOSTYPE_Win10: pszOs = "Windows 10"; break;
+ case VBOXOSTYPE_Win2k16_x64 & ~VBOXOSTYPE_x64: pszOs = "Windows 2k16"; break;
+ case VBOXOSTYPE_Win2k19_x64 & ~VBOXOSTYPE_x64: pszOs = "Windows 2k19"; break;
+ case VBOXOSTYPE_Win11_x64 & ~VBOXOSTYPE_x64: pszOs = "Windows 11"; break;
+ case VBOXOSTYPE_OS2: pszOs = "OS/2"; break;
+ case VBOXOSTYPE_OS2Warp3: pszOs = "OS/2 Warp 3"; break;
+ case VBOXOSTYPE_OS2Warp4: pszOs = "OS/2 Warp 4"; break;
+ case VBOXOSTYPE_OS2Warp45: pszOs = "OS/2 Warp 4.5"; break;
+ case VBOXOSTYPE_ECS: pszOs = "OS/2 ECS"; break;
+ case VBOXOSTYPE_ArcaOS: pszOs = "OS/2 ArcaOS"; break;
+ case VBOXOSTYPE_OS21x: pszOs = "OS/2 2.1x"; break;
+ case VBOXOSTYPE_Linux: pszOs = "Linux"; break;
+ case VBOXOSTYPE_Linux22: pszOs = "Linux 2.2"; break;
+ case VBOXOSTYPE_Linux24: pszOs = "Linux 2.4"; break;
+ case VBOXOSTYPE_Linux26: pszOs = "Linux >= 2.6"; break;
+ case VBOXOSTYPE_ArchLinux: pszOs = "ArchLinux"; break;
+ case VBOXOSTYPE_Debian: pszOs = "Debian"; break;
+ case VBOXOSTYPE_Debian31: pszOs = "Debian 3.1"; break;
+ case VBOXOSTYPE_Debian4: pszOs = "Debian 4.0"; break;
+ case VBOXOSTYPE_Debian5: pszOs = "Debian 5.0"; break;
+ case VBOXOSTYPE_Debian6: pszOs = "Debian 6.0"; break;
+ case VBOXOSTYPE_Debian7: pszOs = "Debian 7"; break;
+ case VBOXOSTYPE_Debian8: pszOs = "Debian 8"; break;
+ case VBOXOSTYPE_Debian9: pszOs = "Debian 9"; break;
+ case VBOXOSTYPE_Debian10: pszOs = "Debian 10"; break;
+ case VBOXOSTYPE_Debian11: pszOs = "Debian 11"; break;
+ case VBOXOSTYPE_Debian12: pszOs = "Debian 12"; break;
+ case VBOXOSTYPE_OpenSUSE: pszOs = "openSUSE"; break;
+ case VBOXOSTYPE_OpenSUSE_Leap_x64 & ~VBOXOSTYPE_x64: pszOs = "openSUSE Leap"; break;
+ case VBOXOSTYPE_OpenSUSE_Tumbleweed: pszOs = "openSUSE Tumbleweed"; break;
+ case VBOXOSTYPE_SUSE_LE: pszOs = "SUSE Linux Enterprise"; break;
+ case VBOXOSTYPE_FedoraCore: pszOs = "Fedora"; break;
+ case VBOXOSTYPE_Gentoo: pszOs = "Gentoo"; break;
+ case VBOXOSTYPE_Mandriva: pszOs = "Mandriva"; break;
+ case VBOXOSTYPE_OpenMandriva_Lx: pszOs = "OpenMandriva Lx"; break;
+ case VBOXOSTYPE_PCLinuxOS: pszOs = "PCLinuxOS"; break;
+ case VBOXOSTYPE_Mageia: pszOs = "Mageia"; break;
+ case VBOXOSTYPE_RedHat: pszOs = "Red Hat"; break;
+ case VBOXOSTYPE_RedHat3: pszOs = "Red Hat 3"; break;
+ case VBOXOSTYPE_RedHat4: pszOs = "Red Hat 4"; break;
+ case VBOXOSTYPE_RedHat5: pszOs = "Red Hat 5"; break;
+ case VBOXOSTYPE_RedHat6: pszOs = "Red Hat 6"; break;
+ case VBOXOSTYPE_RedHat7_x64 & ~VBOXOSTYPE_x64: pszOs = "Red Hat 7"; break;
+ case VBOXOSTYPE_RedHat8_x64 & ~VBOXOSTYPE_x64: pszOs = "Red Hat 8"; break;
+ case VBOXOSTYPE_RedHat9_x64 & ~VBOXOSTYPE_x64: pszOs = "Red Hat 9"; break;
+ case VBOXOSTYPE_Turbolinux: pszOs = "TurboLinux"; break;
+ case VBOXOSTYPE_Ubuntu: pszOs = "Ubuntu"; break;
+ case VBOXOSTYPE_Ubuntu10_LTS: pszOs = "Ubuntu 10.04 LTS"; break;
+ case VBOXOSTYPE_Ubuntu10: pszOs = "Ubuntu 10.10"; break;
+ case VBOXOSTYPE_Ubuntu11: pszOs = "Ubuntu 11.x"; break;
+ case VBOXOSTYPE_Ubuntu12_LTS: pszOs = "Ubuntu 12.04 LTS"; break;
+ case VBOXOSTYPE_Ubuntu12: pszOs = "Ubuntu 12.10"; break;
+ case VBOXOSTYPE_Ubuntu13: pszOs = "Ubuntu 13.x"; break;
+ case VBOXOSTYPE_Ubuntu14_LTS: pszOs = "Ubuntu 14.04 LTS"; break;
+ case VBOXOSTYPE_Ubuntu14: pszOs = "Ubuntu 14.10"; break;
+ case VBOXOSTYPE_Ubuntu15: pszOs = "Ubuntu 15.x"; break;
+ case VBOXOSTYPE_Ubuntu16_LTS: pszOs = "Ubuntu 16.04 LTS"; break;
+ case VBOXOSTYPE_Ubuntu16: pszOs = "Ubuntu 16.10"; break;
+ case VBOXOSTYPE_Ubuntu17: pszOs = "Ubuntu 17.x"; break;
+ case VBOXOSTYPE_Ubuntu18_LTS: pszOs = "Ubuntu 18.04 LTS"; break;
+ case VBOXOSTYPE_Ubuntu18: pszOs = "Ubuntu 18.10"; break;
+ case VBOXOSTYPE_Ubuntu19: pszOs = "Ubuntu 19.x"; break;
+ case VBOXOSTYPE_Ubuntu20_LTS_x64 & ~VBOXOSTYPE_x64: pszOs = "Ubuntu 20.04 LTS"; break;
+ case VBOXOSTYPE_Ubuntu20_x64 & ~VBOXOSTYPE_x64: pszOs = "Ubuntu 20.10"; break;
+ case VBOXOSTYPE_Ubuntu21_x64 & ~VBOXOSTYPE_x64: pszOs = "Ubuntu 21.x"; break;
+ case VBOXOSTYPE_Ubuntu22_LTS_x64 & ~VBOXOSTYPE_x64: pszOs = "Ubuntu 22.04 LTS"; break;
+ case VBOXOSTYPE_Ubuntu22_x64 & ~VBOXOSTYPE_x64: pszOs = "Ubuntu 22.10"; break;
+ case VBOXOSTYPE_Ubuntu23_x64 & ~VBOXOSTYPE_x64: pszOs = "Ubuntu 23.04"; break;
+ case VBOXOSTYPE_Lubuntu: pszOs = "Lubuntu"; break;
+ case VBOXOSTYPE_Xubuntu: pszOs = "Xubuntu"; break;
+ case VBOXOSTYPE_Xandros: pszOs = "Xandros"; break;
+ case VBOXOSTYPE_Oracle: pszOs = "Oracle Linux"; break;
+ case VBOXOSTYPE_Oracle4: pszOs = "Oracle Linux 4"; break;
+ case VBOXOSTYPE_Oracle5: pszOs = "Oracle Linux 5"; break;
+ case VBOXOSTYPE_Oracle6: pszOs = "Oracle Linux 6"; break;
+ case VBOXOSTYPE_Oracle7_x64 & ~VBOXOSTYPE_x64: pszOs = "Oracle Linux 7"; break;
+ case VBOXOSTYPE_Oracle8_x64 & ~VBOXOSTYPE_x64: pszOs = "Oracle Linux 8"; break;
+ case VBOXOSTYPE_Oracle9_x64 & ~VBOXOSTYPE_x64: pszOs = "Oracle Linux 9"; break;
+ case VBOXOSTYPE_FreeBSD: pszOs = "FreeBSD"; break;
+ case VBOXOSTYPE_OpenBSD: pszOs = "OpenBSD"; break;
+ case VBOXOSTYPE_NetBSD: pszOs = "NetBSD"; break;
+ case VBOXOSTYPE_Netware: pszOs = "Netware"; break;
+ case VBOXOSTYPE_Solaris: pszOs = "Solaris"; break;
+ case VBOXOSTYPE_Solaris10U8_or_later: pszOs = "Solaris 10"; break;
+ case VBOXOSTYPE_OpenSolaris: pszOs = "OpenSolaris"; break;
+ case VBOXOSTYPE_Solaris11_x64 & ~VBOXOSTYPE_x64: pszOs = "Solaris 11"; break;
+ case VBOXOSTYPE_MacOS: pszOs = "Mac OS X"; break;
+ case VBOXOSTYPE_MacOS106: pszOs = "Mac OS X 10.6"; break;
+ case VBOXOSTYPE_MacOS107_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.7"; break;
+ case VBOXOSTYPE_MacOS108_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.8"; break;
+ case VBOXOSTYPE_MacOS109_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.9"; break;
+ case VBOXOSTYPE_MacOS1010_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.10"; break;
+ case VBOXOSTYPE_MacOS1011_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.11"; break;
+ case VBOXOSTYPE_MacOS1012_x64 & ~VBOXOSTYPE_x64: pszOs = "macOS 10.12"; break;
+ case VBOXOSTYPE_MacOS1013_x64 & ~VBOXOSTYPE_x64: pszOs = "macOS 10.13"; break;
+ case VBOXOSTYPE_Haiku: pszOs = "Haiku"; break;
+ case VBOXOSTYPE_VBoxBS_x64 & ~VBOXOSTYPE_x64: pszOs = "VBox Bootsector"; break;
+ default: pszOs = "unknown"; break;
+ }
+ LogRel(("VMMDev: Guest Additions information report: Interface = 0x%08X osType = 0x%08X (%s, %u-bit)\n",
+ pGuestInfo->interfaceVersion, pGuestInfo->osType, pszOs,
+ pGuestInfo->osType & VBOXOSTYPE_x64 ? 64 : 32));
+}
+
+
+/**
+ * Sets the IRQ (raise it or lower it) for 1.03 additions.
+ *
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @thread Any.
+ * @remarks Must be called owning the critical section.
+ */
+static void vmmdevSetIRQ_Legacy(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC)
+{
+ if (pThis->fu32AdditionsOk)
+ {
+ /* Filter unsupported events */
+ uint32_t fEvents = pThis->fHostEventFlags & pThisCC->CTX_SUFF(pVMMDevRAM)->V.V1_03.u32GuestEventMask;
+
+ Log(("vmmdevSetIRQ: fEvents=%#010x, fHostEventFlags=%#010x, u32GuestEventMask=%#010x.\n",
+ fEvents, pThis->fHostEventFlags, pThisCC->CTX_SUFF(pVMMDevRAM)->V.V1_03.u32GuestEventMask));
+
+ /* Move event flags to VMMDev RAM */
+ pThisCC->CTX_SUFF(pVMMDevRAM)->V.V1_03.u32HostEvents = fEvents;
+
+ uint32_t uIRQLevel = 0;
+ if (fEvents)
+ {
+ /* Clear host flags which will be delivered to guest. */
+ pThis->fHostEventFlags &= ~fEvents;
+ Log(("vmmdevSetIRQ: fHostEventFlags=%#010x\n", pThis->fHostEventFlags));
+ uIRQLevel = 1;
+ }
+
+ /* Set IRQ level for pin 0 (see NoWait comment in vmmdevMaybeSetIRQ). */
+ /** @todo make IRQ pin configurable, at least a symbolic constant */
+ PDMDevHlpPCISetIrqNoWait(pDevIns, 0, uIRQLevel);
+ Log(("vmmdevSetIRQ: IRQ set %d\n", uIRQLevel));
+ }
+ else
+ Log(("vmmdevSetIRQ: IRQ is not generated, guest has not yet reported to us.\n"));
+}
+
+
+/**
+ * Sets the IRQ if there are events to be delivered.
+ *
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @thread Any.
+ * @remarks Must be called owning the critical section.
+ */
+static void vmmdevMaybeSetIRQ(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC)
+{
+ Log3(("vmmdevMaybeSetIRQ: fHostEventFlags=%#010x, fGuestFilterMask=%#010x.\n",
+ pThis->fHostEventFlags, pThis->fGuestFilterMask));
+
+ if (pThis->fHostEventFlags & pThis->fGuestFilterMask)
+ {
+ /*
+ * Note! No need to wait for the IRQs to be set (if we're not luck
+ * with the locks, etc). It is a notification about something,
+ * which has already happened.
+ */
+ pThisCC->pVMMDevRAMR3->V.V1_04.fHaveEvents = true;
+ PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 1);
+ Log3(("vmmdevMaybeSetIRQ: IRQ set.\n"));
+ }
+}
+
+/**
+ * Notifies the guest about new events (@a fAddEvents).
+ *
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param fAddEvents New events to add.
+ * @thread Any.
+ * @remarks Must be called owning the critical section.
+ */
+static void vmmdevNotifyGuestWorker(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t fAddEvents)
+{
+ Log3(("vmmdevNotifyGuestWorker: fAddEvents=%#010x.\n", fAddEvents));
+ Assert(PDMDevHlpCritSectIsOwner(pDevIns, &pThis->CritSect));
+
+ if (!VMMDEV_INTERFACE_VERSION_IS_1_03(pThis))
+ {
+ Log3(("vmmdevNotifyGuestWorker: New additions detected.\n"));
+
+ if (pThis->fu32AdditionsOk)
+ {
+ const bool fHadEvents = (pThis->fHostEventFlags & pThis->fGuestFilterMask) != 0;
+
+ Log3(("vmmdevNotifyGuestWorker: fHadEvents=%d, fHostEventFlags=%#010x, fGuestFilterMask=%#010x.\n",
+ fHadEvents, pThis->fHostEventFlags, pThis->fGuestFilterMask));
+
+ pThis->fHostEventFlags |= fAddEvents;
+
+ if (!fHadEvents)
+ vmmdevMaybeSetIRQ(pDevIns, pThis, pThisCC);
+ }
+ else
+ {
+ pThis->fHostEventFlags |= fAddEvents;
+ Log(("vmmdevNotifyGuestWorker: IRQ is not generated, guest has not yet reported to us.\n"));
+ }
+ }
+ else
+ {
+ Log3(("vmmdevNotifyGuestWorker: Old additions detected.\n"));
+
+ pThis->fHostEventFlags |= fAddEvents;
+ vmmdevSetIRQ_Legacy(pDevIns, pThis, pThisCC);
+ }
+}
+
+
+
+/* -=-=-=-=- Interfaces shared with VMMDevHGCM.cpp -=-=-=-=- */
+
+/**
+ * Notifies the guest about new events (@a fAddEvents).
+ *
+ * This is used by VMMDev.cpp as well as VMMDevHGCM.cpp.
+ *
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param fAddEvents New events to add.
+ * @thread Any.
+ */
+void VMMDevNotifyGuest(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t fAddEvents)
+{
+ Log3(("VMMDevNotifyGuest: fAddEvents=%#010x\n", fAddEvents));
+
+ /*
+ * Only notify the VM when it's running.
+ */
+ VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
+ if ( enmVMState == VMSTATE_RUNNING
+ || enmVMState == VMSTATE_RUNNING_LS
+ || enmVMState == VMSTATE_LOADING
+ || enmVMState == VMSTATE_RESUMING
+ || enmVMState == VMSTATE_SUSPENDING
+ || enmVMState == VMSTATE_SUSPENDING_LS
+ || enmVMState == VMSTATE_SUSPENDING_EXT_LS
+ || enmVMState == VMSTATE_DEBUGGING
+ || enmVMState == VMSTATE_DEBUGGING_LS
+ )
+ {
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->CritSect, rcLock);
+
+ vmmdevNotifyGuestWorker(pDevIns, pThis, pThisCC, fAddEvents);
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ }
+ else
+ LogRel(("VMMDevNotifyGuest: fAddEvents=%#x ignored because enmVMState=%d\n", fAddEvents, enmVMState));
+}
+
+/**
+ * Code shared by VMMDevReq_CtlGuestFilterMask and HGCM for controlling the
+ * events the guest are interested in.
+ *
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param fOrMask Events to add (VMMDEV_EVENT_XXX). Pass 0 for no
+ * change.
+ * @param fNotMask Events to remove (VMMDEV_EVENT_XXX). Pass 0 for no
+ * change.
+ *
+ * @remarks When HGCM will automatically enable VMMDEV_EVENT_HGCM when the guest
+ * starts submitting HGCM requests. Otherwise, the events are
+ * controlled by the guest.
+ */
+void VMMDevCtlSetGuestFilterMask(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t fOrMask, uint32_t fNotMask)
+{
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->CritSect, rcLock);
+
+ const bool fHadEvents = (pThis->fHostEventFlags & pThis->fGuestFilterMask) != 0;
+
+ Log(("VMMDevCtlSetGuestFilterMask: fOrMask=%#010x, u32NotMask=%#010x, fHadEvents=%d.\n", fOrMask, fNotMask, fHadEvents));
+ if (fHadEvents)
+ {
+ if (!pThis->fNewGuestFilterMaskValid)
+ pThis->fNewGuestFilterMask = pThis->fGuestFilterMask;
+
+ pThis->fNewGuestFilterMask |= fOrMask;
+ pThis->fNewGuestFilterMask &= ~fNotMask;
+ pThis->fNewGuestFilterMaskValid = true;
+ }
+ else
+ {
+ pThis->fGuestFilterMask |= fOrMask;
+ pThis->fGuestFilterMask &= ~fNotMask;
+ vmmdevMaybeSetIRQ(pDevIns, pThis, pThisCC);
+ }
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+}
+
+
+
+/* -=-=-=-=- Request processing functions. -=-=-=-=- */
+
+/**
+ * Handles VMMDevReq_ReportGuestInfo.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pRequestHeader The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestInfo(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
+ VMMDevRequestHeader *pRequestHeader)
+{
+ AssertMsgReturn(pRequestHeader->size == sizeof(VMMDevReportGuestInfo), ("%u\n", pRequestHeader->size), VERR_INVALID_PARAMETER);
+ VBoxGuestInfo const *pInfo = &((VMMDevReportGuestInfo *)pRequestHeader)->guestInfo;
+
+ if (memcmp(&pThis->guestInfo, pInfo, sizeof(*pInfo)) != 0)
+ {
+ /* Make a copy of supplied information. */
+ pThis->guestInfo = *pInfo;
+
+ /* Check additions interface version. */
+ pThis->fu32AdditionsOk = VMMDEV_INTERFACE_VERSION_IS_OK(pThis->guestInfo.interfaceVersion);
+
+ vmmdevLogGuestOsInfo(&pThis->guestInfo);
+
+ if (pThisCC->pDrv && pThisCC->pDrv->pfnUpdateGuestInfo)
+ pThisCC->pDrv->pfnUpdateGuestInfo(pThisCC->pDrv, &pThis->guestInfo);
+ }
+
+ if (!pThis->fu32AdditionsOk)
+ return VERR_VERSION_MISMATCH;
+
+ /* Clear our IRQ in case it was high for whatever reason. */
+ PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GuestHeartbeat.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ */
+static int vmmDevReqHandler_GuestHeartbeat(PPDMDEVINS pDevIns, PVMMDEV pThis)
+{
+ int rc;
+ if (pThis->fHeartbeatActive)
+ {
+ uint64_t const nsNowTS = PDMDevHlpTimerGetNano(pDevIns, pThis->hFlatlinedTimer);
+ if (!pThis->fFlatlined)
+ { /* likely */ }
+ else
+ {
+ LogRel(("VMMDev: GuestHeartBeat: Guest is alive (gone %'llu ns)\n", nsNowTS - pThis->nsLastHeartbeatTS));
+ ASMAtomicWriteBool(&pThis->fFlatlined, false);
+ }
+ ASMAtomicWriteU64(&pThis->nsLastHeartbeatTS, nsNowTS);
+
+ /* Postpone (or restart if we missed a beat) the timeout timer. */
+ rc = PDMDevHlpTimerSetNano(pDevIns, pThis->hFlatlinedTimer, pThis->cNsHeartbeatTimeout);
+ }
+ else
+ rc = VINF_SUCCESS;
+ return rc;
+}
+
+
+/**
+ * Timer that fires when where have been no heartbeats for a given time.
+ *
+ * @remarks Does not take the VMMDev critsect.
+ */
+static DECLCALLBACK(void) vmmDevHeartbeatFlatlinedTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
+{
+ PVMMDEV pThis = (PVMMDEV)pvUser;
+ Assert(hTimer == pThis->hFlatlinedTimer);
+ if (pThis->fHeartbeatActive)
+ {
+ uint64_t cNsElapsed = PDMDevHlpTimerGetNano(pDevIns, hTimer) - pThis->nsLastHeartbeatTS;
+ if ( !pThis->fFlatlined
+ && cNsElapsed >= pThis->cNsHeartbeatInterval)
+ {
+ LogRel(("VMMDev: vmmDevHeartbeatFlatlinedTimer: Guest seems to be unresponsive. Last heartbeat received %RU64 seconds ago\n",
+ cNsElapsed / RT_NS_1SEC));
+ ASMAtomicWriteBool(&pThis->fFlatlined, true);
+ }
+ }
+}
+
+
+/**
+ * Handles VMMDevReq_HeartbeatConfigure.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmDevReqHandler_HeartbeatConfigure(PPDMDEVINS pDevIns, PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ AssertMsgReturn(pReqHdr->size == sizeof(VMMDevReqHeartbeat), ("%u\n", pReqHdr->size), VERR_INVALID_PARAMETER);
+ VMMDevReqHeartbeat *pReq = (VMMDevReqHeartbeat *)pReqHdr;
+ int rc;
+
+ pReq->cNsInterval = pThis->cNsHeartbeatInterval;
+
+ if (pReq->fEnabled != pThis->fHeartbeatActive)
+ {
+ ASMAtomicWriteBool(&pThis->fHeartbeatActive, pReq->fEnabled);
+ if (pReq->fEnabled)
+ {
+ /*
+ * Activate the heartbeat monitor.
+ */
+ pThis->nsLastHeartbeatTS = PDMDevHlpTimerGetNano(pDevIns, pThis->hFlatlinedTimer);
+ rc = PDMDevHlpTimerSetNano(pDevIns, pThis->hFlatlinedTimer, pThis->cNsHeartbeatTimeout);
+ if (RT_SUCCESS(rc))
+ LogRel(("VMMDev: Heartbeat flatline timer set to trigger after %'RU64 ns\n", pThis->cNsHeartbeatTimeout));
+ else
+ LogRel(("VMMDev: Error starting flatline timer (heartbeat): %Rrc\n", rc));
+ }
+ else
+ {
+ /*
+ * Deactivate the heartbeat monitor.
+ */
+ rc = PDMDevHlpTimerStop(pDevIns, pThis->hFlatlinedTimer);
+ LogRel(("VMMDev: Heartbeat checking timer has been stopped (rc=%Rrc)\n", rc));
+ }
+ }
+ else
+ {
+ LogRel(("VMMDev: vmmDevReqHandler_HeartbeatConfigure: No change (fHeartbeatActive=%RTbool)\n", pThis->fHeartbeatActive));
+ rc = VINF_SUCCESS;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Handles VMMDevReq_NtBugCheck.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmDevReqHandler_NtBugCheck(PPDMDEVINS pDevIns, VMMDevRequestHeader *pReqHdr)
+{
+ if (pReqHdr->size == sizeof(VMMDevReqNtBugCheck))
+ {
+ VMMDevReqNtBugCheck const *pReq = (VMMDevReqNtBugCheck const *)pReqHdr;
+ PDMDevHlpDBGFReportBugCheck(pDevIns, DBGFEVENT_BSOD_VMMDEV,
+ pReq->uBugCheck, pReq->auParameters[0], pReq->auParameters[1],
+ pReq->auParameters[2], pReq->auParameters[3]);
+ }
+ else if (pReqHdr->size == sizeof(VMMDevRequestHeader))
+ {
+ LogRel(("VMMDev: NT BugCheck w/o data.\n"));
+ PDMDevHlpDBGFReportBugCheck(pDevIns, DBGFEVENT_BSOD_VMMDEV, 0, 0, 0, 0, 0);
+ }
+ else
+ return VERR_INVALID_PARAMETER;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Validates a publisher tag.
+ *
+ * @returns true / false.
+ * @param pszTag Tag to validate.
+ */
+static bool vmmdevReqIsValidPublisherTag(const char *pszTag)
+{
+ /* Note! This character set is also found in Config.kmk. */
+ static char const s_szValidChars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz()[]{}+-.,";
+
+ while (*pszTag != '\0')
+ {
+ if (!strchr(s_szValidChars, *pszTag))
+ return false;
+ pszTag++;
+ }
+ return true;
+}
+
+
+/**
+ * Validates a build tag.
+ *
+ * @returns true / false.
+ * @param pszTag Tag to validate.
+ */
+static bool vmmdevReqIsValidBuildTag(const char *pszTag)
+{
+ int cchPrefix;
+ if (!strncmp(pszTag, "RC", 2))
+ cchPrefix = 2;
+ else if (!strncmp(pszTag, "BETA", 4))
+ cchPrefix = 4;
+ else if (!strncmp(pszTag, "ALPHA", 5))
+ cchPrefix = 5;
+ else
+ return false;
+
+ if (pszTag[cchPrefix] == '\0')
+ return true;
+
+ uint8_t u8;
+ int rc = RTStrToUInt8Full(&pszTag[cchPrefix], 10, &u8);
+ return rc == VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestInfo2.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestInfo2(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ AssertMsgReturn(pReqHdr->size == sizeof(VMMDevReportGuestInfo2), ("%u\n", pReqHdr->size), VERR_INVALID_PARAMETER);
+ VBoxGuestInfo2 const *pInfo2 = &((VMMDevReportGuestInfo2 *)pReqHdr)->guestInfo;
+
+ LogRel(("VMMDev: Guest Additions information report: Version %d.%d.%d r%d '%.*s'\n",
+ pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild,
+ pInfo2->additionsRevision, sizeof(pInfo2->szName), pInfo2->szName));
+
+ /* The interface was introduced in 3.2 and will definitely not be
+ backported beyond 3.0 (bird). */
+ AssertMsgReturn(pInfo2->additionsMajor >= 3,
+ ("%u.%u.%u\n", pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild),
+ VERR_INVALID_PARAMETER);
+
+ /* The version must fit in a full version compression. */
+ uint32_t uFullVersion = VBOX_FULL_VERSION_MAKE(pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild);
+ AssertMsgReturn( VBOX_FULL_VERSION_GET_MAJOR(uFullVersion) == pInfo2->additionsMajor
+ && VBOX_FULL_VERSION_GET_MINOR(uFullVersion) == pInfo2->additionsMinor
+ && VBOX_FULL_VERSION_GET_BUILD(uFullVersion) == pInfo2->additionsBuild,
+ ("%u.%u.%u\n", pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild),
+ VERR_OUT_OF_RANGE);
+
+ /*
+ * Validate the name.
+ * Be less strict towards older additions (< v4.1.50).
+ */
+ AssertCompile(sizeof(pThis->guestInfo2.szName) == sizeof(pInfo2->szName));
+ AssertReturn(RTStrEnd(pInfo2->szName, sizeof(pInfo2->szName)) != NULL, VERR_INVALID_PARAMETER);
+ const char *pszName = pInfo2->szName;
+
+ /* The version number which shouldn't be there. */
+ char szTmp[sizeof(pInfo2->szName)];
+ size_t cchStart = RTStrPrintf(szTmp, sizeof(szTmp), "%u.%u.%u", pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild);
+ AssertMsgReturn(!strncmp(pszName, szTmp, cchStart), ("%s != %s\n", pszName, szTmp), VERR_INVALID_PARAMETER);
+ pszName += cchStart;
+
+ /* Now we can either have nothing or a build tag or/and a publisher tag. */
+ if (*pszName != '\0')
+ {
+ const char *pszRelaxedName = "";
+ bool const fStrict = pInfo2->additionsMajor > 4
+ || (pInfo2->additionsMajor == 4 && pInfo2->additionsMinor > 1)
+ || (pInfo2->additionsMajor == 4 && pInfo2->additionsMinor == 1 && pInfo2->additionsBuild >= 50);
+ bool fOk = false;
+ if (*pszName == '_')
+ {
+ pszName++;
+ strcpy(szTmp, pszName);
+ char *pszTag2 = strchr(szTmp, '_');
+ if (!pszTag2)
+ {
+ fOk = vmmdevReqIsValidBuildTag(szTmp)
+ || vmmdevReqIsValidPublisherTag(szTmp);
+ }
+ else
+ {
+ *pszTag2++ = '\0';
+ fOk = vmmdevReqIsValidBuildTag(szTmp);
+ if (fOk)
+ {
+ fOk = vmmdevReqIsValidPublisherTag(pszTag2);
+ if (!fOk)
+ pszRelaxedName = szTmp;
+ }
+ }
+ }
+
+ if (!fOk)
+ {
+ AssertLogRelMsgReturn(!fStrict, ("%s", pszName), VERR_INVALID_PARAMETER);
+
+ /* non-strict mode, just zap the extra stuff. */
+ LogRel(("VMMDev: ReportGuestInfo2: Ignoring unparsable version name bits: '%s' -> '%s'.\n", pszName, pszRelaxedName));
+ pszName = pszRelaxedName;
+ }
+ }
+
+ /*
+ * Save the info and tell Main or whoever is listening.
+ */
+ pThis->guestInfo2.uFullVersion = uFullVersion;
+ pThis->guestInfo2.uRevision = pInfo2->additionsRevision;
+ pThis->guestInfo2.fFeatures = pInfo2->additionsFeatures;
+ strcpy(pThis->guestInfo2.szName, pszName);
+
+ if (pThisCC->pDrv && pThisCC->pDrv->pfnUpdateGuestInfo2)
+ pThisCC->pDrv->pfnUpdateGuestInfo2(pThisCC->pDrv, uFullVersion, pszName, pInfo2->additionsRevision,
+ pInfo2->additionsFeatures);
+
+ /* Clear our IRQ in case it was high for whatever reason. */
+ PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Allocates a new facility status entry, initializing it to inactive.
+ *
+ * @returns Pointer to a facility status entry on success, NULL on failure
+ * (table full).
+ * @param pThis The VMMDev shared instance data.
+ * @param enmFacility The facility type code.
+ * @param fFixed This is set when allocating the standard entries
+ * from the constructor.
+ * @param pTimeSpecNow Optionally giving the entry timestamp to use (ctor).
+ */
+static PVMMDEVFACILITYSTATUSENTRY
+vmmdevAllocFacilityStatusEntry(PVMMDEV pThis, VBoxGuestFacilityType enmFacility, bool fFixed, PCRTTIMESPEC pTimeSpecNow)
+{
+ /* If full, expunge one inactive entry. */
+ if (pThis->cFacilityStatuses == RT_ELEMENTS(pThis->aFacilityStatuses))
+ {
+ uint32_t i = pThis->cFacilityStatuses;
+ while (i-- > 0)
+ {
+ if ( pThis->aFacilityStatuses[i].enmStatus == VBoxGuestFacilityStatus_Inactive
+ && !pThis->aFacilityStatuses[i].fFixed)
+ {
+ pThis->cFacilityStatuses--;
+ int cToMove = pThis->cFacilityStatuses - i;
+ if (cToMove)
+ memmove(&pThis->aFacilityStatuses[i], &pThis->aFacilityStatuses[i + 1],
+ cToMove * sizeof(pThis->aFacilityStatuses[i]));
+ RT_ZERO(pThis->aFacilityStatuses[pThis->cFacilityStatuses]);
+ break;
+ }
+ }
+
+ if (pThis->cFacilityStatuses == RT_ELEMENTS(pThis->aFacilityStatuses))
+ return NULL;
+ }
+
+ /* Find location in array (it's sorted). */
+ uint32_t i = pThis->cFacilityStatuses;
+ while (i-- > 0)
+ if ((uint32_t)pThis->aFacilityStatuses[i].enmFacility < (uint32_t)enmFacility)
+ break;
+ i++;
+
+ /* Move. */
+ int cToMove = pThis->cFacilityStatuses - i;
+ if (cToMove > 0)
+ memmove(&pThis->aFacilityStatuses[i + 1], &pThis->aFacilityStatuses[i],
+ cToMove * sizeof(pThis->aFacilityStatuses[i]));
+ pThis->cFacilityStatuses++;
+
+ /* Initialize. */
+ pThis->aFacilityStatuses[i].enmFacility = enmFacility;
+ pThis->aFacilityStatuses[i].enmStatus = VBoxGuestFacilityStatus_Inactive;
+ pThis->aFacilityStatuses[i].fFixed = fFixed;
+ pThis->aFacilityStatuses[i].afPadding[0] = 0;
+ pThis->aFacilityStatuses[i].afPadding[1] = 0;
+ pThis->aFacilityStatuses[i].afPadding[2] = 0;
+ pThis->aFacilityStatuses[i].fFlags = 0;
+ if (pTimeSpecNow)
+ pThis->aFacilityStatuses[i].TimeSpecTS = *pTimeSpecNow;
+ else
+ RTTimeSpecSetNano(&pThis->aFacilityStatuses[i].TimeSpecTS, 0);
+
+ return &pThis->aFacilityStatuses[i];
+}
+
+
+/**
+ * Gets a facility status entry, allocating a new one if not already present.
+ *
+ * @returns Pointer to a facility status entry on success, NULL on failure
+ * (table full).
+ * @param pThis The VMMDev shared instance data.
+ * @param enmFacility The facility type code.
+ */
+static PVMMDEVFACILITYSTATUSENTRY vmmdevGetFacilityStatusEntry(PVMMDEV pThis, VBoxGuestFacilityType enmFacility)
+{
+ /** @todo change to binary search. */
+ uint32_t i = pThis->cFacilityStatuses;
+ while (i-- > 0)
+ {
+ if (pThis->aFacilityStatuses[i].enmFacility == enmFacility)
+ return &pThis->aFacilityStatuses[i];
+ if ((uint32_t)pThis->aFacilityStatuses[i].enmFacility < (uint32_t)enmFacility)
+ break;
+ }
+ return vmmdevAllocFacilityStatusEntry(pThis, enmFacility, false /*fFixed*/, NULL);
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestStatus(PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn(pReqHdr->size == sizeof(VMMDevReportGuestStatus), ("%u\n", pReqHdr->size), VERR_INVALID_PARAMETER);
+ VBoxGuestStatus *pStatus = &((VMMDevReportGuestStatus *)pReqHdr)->guestStatus;
+ AssertMsgReturn( pStatus->facility > VBoxGuestFacilityType_Unknown
+ && pStatus->facility <= VBoxGuestFacilityType_All,
+ ("%d\n", pStatus->facility),
+ VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pStatus->status == (VBoxGuestFacilityStatus)(uint16_t)pStatus->status,
+ ("%#x (%u)\n", pStatus->status, pStatus->status),
+ VERR_OUT_OF_RANGE);
+
+ /*
+ * Do the update.
+ */
+ RTTIMESPEC Now;
+ RTTimeNow(&Now);
+ if (pStatus->facility == VBoxGuestFacilityType_All)
+ {
+ uint32_t i = pThis->cFacilityStatuses;
+ while (i-- > 0)
+ {
+ pThis->aFacilityStatuses[i].TimeSpecTS = Now;
+ pThis->aFacilityStatuses[i].enmStatus = pStatus->status;
+ pThis->aFacilityStatuses[i].fFlags = pStatus->flags;
+ }
+ }
+ else
+ {
+ PVMMDEVFACILITYSTATUSENTRY pEntry = vmmdevGetFacilityStatusEntry(pThis, pStatus->facility);
+ if (!pEntry)
+ {
+ LogRelMax(10, ("VMMDev: Facility table is full - facility=%u status=%u\n", pStatus->facility, pStatus->status));
+ return VERR_OUT_OF_RESOURCES;
+ }
+
+ pEntry->TimeSpecTS = Now;
+ pEntry->enmStatus = pStatus->status;
+ pEntry->fFlags = pStatus->flags;
+ }
+
+ if (pThisCC->pDrv && pThisCC->pDrv->pfnUpdateGuestStatus)
+ pThisCC->pDrv->pfnUpdateGuestStatus(pThisCC->pDrv, pStatus->facility, pStatus->status, pStatus->flags, &Now);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestUserState.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestUserState(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ /*
+ * Validate input.
+ */
+ VMMDevReportGuestUserState *pReq = (VMMDevReportGuestUserState *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReqHdr->size), VERR_INVALID_PARAMETER);
+
+ if ( pThisCC->pDrv
+ && pThisCC->pDrv->pfnUpdateGuestUserState)
+ {
+ /* Play safe. */
+ AssertReturn(pReq->header.size <= _2K, VERR_TOO_MUCH_DATA);
+ AssertReturn(pReq->status.cbUser <= 256, VERR_TOO_MUCH_DATA);
+ AssertReturn(pReq->status.cbDomain <= 256, VERR_TOO_MUCH_DATA);
+ AssertReturn(pReq->status.cbDetails <= _1K, VERR_TOO_MUCH_DATA);
+
+ /* pbDynamic marks the beginning of the struct's dynamically
+ * allocated data area. */
+ uint8_t *pbDynamic = (uint8_t *)&pReq->status.szUser;
+ uint32_t cbLeft = pReqHdr->size - RT_UOFFSETOF(VMMDevReportGuestUserState, status.szUser);
+
+ /* The user. */
+ AssertReturn(pReq->status.cbUser > 0, VERR_INVALID_PARAMETER); /* User name is required. */
+ AssertReturn(pReq->status.cbUser <= cbLeft, VERR_INVALID_PARAMETER);
+ const char *pszUser = (const char *)pbDynamic;
+ AssertReturn(RTStrEnd(pszUser, pReq->status.cbUser), VERR_INVALID_PARAMETER);
+ int rc = RTStrValidateEncoding(pszUser);
+ AssertRCReturn(rc, rc);
+
+ /* Advance to the next field. */
+ pbDynamic += pReq->status.cbUser;
+ cbLeft -= pReq->status.cbUser;
+
+ /* pszDomain can be NULL. */
+ AssertReturn(pReq->status.cbDomain <= cbLeft, VERR_INVALID_PARAMETER);
+ const char *pszDomain = NULL;
+ if (pReq->status.cbDomain)
+ {
+ pszDomain = (const char *)pbDynamic;
+ AssertReturn(RTStrEnd(pszDomain, pReq->status.cbDomain), VERR_INVALID_PARAMETER);
+ rc = RTStrValidateEncoding(pszDomain);
+ AssertRCReturn(rc, rc);
+
+ /* Advance to the next field. */
+ pbDynamic += pReq->status.cbDomain;
+ cbLeft -= pReq->status.cbDomain;
+ }
+
+ /* pbDetails can be NULL. */
+ const uint8_t *pbDetails = NULL;
+ AssertReturn(pReq->status.cbDetails <= cbLeft, VERR_INVALID_PARAMETER);
+ if (pReq->status.cbDetails > 0)
+ pbDetails = pbDynamic;
+
+ pThisCC->pDrv->pfnUpdateGuestUserState(pThisCC->pDrv, pszUser, pszDomain, (uint32_t)pReq->status.state,
+ pbDetails, pReq->status.cbDetails);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestCapabilities.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestCapabilities(PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqGuestCapabilities *pReq = (VMMDevReqGuestCapabilities *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* Enable VMMDEV_GUEST_SUPPORTS_GRAPHICS automatically for guests using the old
+ * request to report their capabilities.
+ */
+ const uint32_t fu32Caps = pReq->caps | VMMDEV_GUEST_SUPPORTS_GRAPHICS;
+
+ if (pThis->fGuestCaps != fu32Caps)
+ {
+ /* make a copy of supplied information */
+ pThis->fGuestCaps = fu32Caps;
+
+ LogRel(("VMMDev: Guest Additions capability report (legacy): (0x%x) seamless: %s, hostWindowMapping: %s, graphics: yes\n",
+ fu32Caps,
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS ? "yes" : "no",
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING ? "yes" : "no"));
+
+ if (pThisCC->pDrv && pThisCC->pDrv->pfnUpdateGuestCapabilities)
+ pThisCC->pDrv->pfnUpdateGuestCapabilities(pThisCC->pDrv, fu32Caps);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_SetGuestCapabilities.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetGuestCapabilities(PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqGuestCapabilities2 *pReq = (VMMDevReqGuestCapabilities2 *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ uint32_t fu32Caps = pThis->fGuestCaps;
+ fu32Caps |= pReq->u32OrMask;
+ fu32Caps &= ~pReq->u32NotMask;
+
+ LogRel(("VMMDev: Guest Additions capability report: (%#x -> %#x) seamless: %s, hostWindowMapping: %s, graphics: %s\n",
+ pThis->fGuestCaps, fu32Caps,
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS ? "yes" : "no",
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING ? "yes" : "no",
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS ? "yes" : "no"));
+
+ pThis->fGuestCaps = fu32Caps;
+
+ if (pThisCC->pDrv && pThisCC->pDrv->pfnUpdateGuestCapabilities)
+ pThisCC->pDrv->pfnUpdateGuestCapabilities(pThisCC->pDrv, fu32Caps);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetMouseStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetMouseStatus(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqMouseStatus *pReq = (VMMDevReqMouseStatus *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->mouseFeatures = pThis->fMouseCapabilities
+ & VMMDEV_MOUSE_MASK;
+ pReq->pointerXPos = pThis->xMouseAbs;
+ pReq->pointerYPos = pThis->yMouseAbs;
+ LogRel2(("VMMDev: vmmdevReqHandler_GetMouseStatus: mouseFeatures=%#x, xAbs=%d, yAbs=%d\n",
+ pReq->mouseFeatures, pReq->pointerXPos, pReq->pointerYPos));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetMouseStatusEx.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetMouseStatusEx(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqMouseStatusEx *pReq = (VMMDevReqMouseStatusEx *)pReqHdr;
+ AssertMsgReturn(pReq->Core.header.size == sizeof(*pReq), ("%u\n", pReq->Core.header.size), VERR_INVALID_PARAMETER);
+
+ /* Main will convert host mouse buttons state obtained from GUI
+ * into PDMIMOUSEPORT_BUTTON_XXX representation. Guest will expect it
+ * to VMMDEV_MOUSE_BUTTON_XXX representaion. Make sure both
+ * representations are identical. */
+ AssertCompile(VMMDEV_MOUSE_BUTTON_LEFT == PDMIMOUSEPORT_BUTTON_LEFT);
+ AssertCompile(VMMDEV_MOUSE_BUTTON_RIGHT == PDMIMOUSEPORT_BUTTON_RIGHT);
+ AssertCompile(VMMDEV_MOUSE_BUTTON_MIDDLE == PDMIMOUSEPORT_BUTTON_MIDDLE);
+ AssertCompile(VMMDEV_MOUSE_BUTTON_X1 == PDMIMOUSEPORT_BUTTON_X1);
+ AssertCompile(VMMDEV_MOUSE_BUTTON_X2 == PDMIMOUSEPORT_BUTTON_X2);
+
+ pReq->Core.mouseFeatures = pThis->fMouseCapabilities & VMMDEV_MOUSE_MASK;
+ pReq->Core.pointerXPos = pThis->xMouseAbs;
+ pReq->Core.pointerYPos = pThis->yMouseAbs;
+ pReq->dz = pThis->dzMouse;
+ pReq->dw = pThis->dwMouse;
+ pReq->fButtons = pThis->fMouseButtons;
+ LogRel2(("VMMDev: vmmdevReqHandler_GetMouseStatusEx: mouseFeatures=%#x, xAbs=%d, yAbs=%d, zAbs=%d, wMouseRel=%d, fButtons=0x%x\n",
+ pReq->Core.mouseFeatures, pReq->Core.pointerXPos, pReq->Core.pointerYPos, pReq->dz, pReq->dw, pReq->fButtons));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_SetMouseStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetMouseStatus(PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqMouseStatus *pReq = (VMMDevReqMouseStatus *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ LogRelFlow(("VMMDev: vmmdevReqHandler_SetMouseStatus: mouseFeatures=%#x\n", pReq->mouseFeatures));
+
+ bool fNotify = false;
+ if ( (pReq->mouseFeatures & VMMDEV_MOUSE_NOTIFY_HOST_MASK)
+ != ( pThis->fMouseCapabilities
+ & VMMDEV_MOUSE_NOTIFY_HOST_MASK))
+ fNotify = true;
+
+ pThis->fMouseCapabilities &= ~VMMDEV_MOUSE_GUEST_MASK;
+ pThis->fMouseCapabilities |= (pReq->mouseFeatures & VMMDEV_MOUSE_GUEST_MASK);
+
+ LogRelFlow(("VMMDev: vmmdevReqHandler_SetMouseStatus: New host capabilities: %#x\n", pThis->fMouseCapabilities));
+
+ /*
+ * Notify connector if something changed.
+ */
+ if (fNotify)
+ {
+ LogRelFlow(("VMMDev: vmmdevReqHandler_SetMouseStatus: Notifying connector\n"));
+ pThisCC->pDrv->pfnUpdateMouseCapabilities(pThisCC->pDrv, pThis->fMouseCapabilities);
+ }
+
+ return VINF_SUCCESS;
+}
+
+static int vmmdevVerifyPointerShape(VMMDevReqMousePointer *pReq)
+{
+ /* Should be enough for most mouse pointers. */
+ if (pReq->width > 8192 || pReq->height > 8192)
+ return VERR_INVALID_PARAMETER;
+
+ uint32_t cbShape = (pReq->width + 7) / 8 * pReq->height; /* size of the AND mask */
+ cbShape = ((cbShape + 3) & ~3) + pReq->width * 4 * pReq->height; /* + gap + size of the XOR mask */
+ if (RT_UOFFSETOF(VMMDevReqMousePointer, pointerData) + cbShape > pReq->header.size)
+ return VERR_INVALID_PARAMETER;
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Handles VMMDevReq_SetPointerShape.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetPointerShape(PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqMousePointer *pReq = (VMMDevReqMousePointer *)pReqHdr;
+ if (pReq->header.size < sizeof(*pReq))
+ {
+ AssertMsg(pReq->header.size == 0x10028 && pReq->header.version == 10000, /* don't complain about legacy!!! */
+ ("VMMDev mouse shape structure has invalid size %d (%#x) version=%d!\n",
+ pReq->header.size, pReq->header.size, pReq->header.version));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ bool fVisible = RT_BOOL(pReq->fFlags & VBOX_MOUSE_POINTER_VISIBLE);
+ bool fAlpha = RT_BOOL(pReq->fFlags & VBOX_MOUSE_POINTER_ALPHA);
+ bool fShape = RT_BOOL(pReq->fFlags & VBOX_MOUSE_POINTER_SHAPE);
+
+ Log(("VMMDevReq_SetPointerShape: visible: %d, alpha: %d, shape = %d, width: %d, height: %d\n",
+ fVisible, fAlpha, fShape, pReq->width, pReq->height));
+
+ if (pReq->header.size == sizeof(VMMDevReqMousePointer))
+ {
+ /* The guest did not provide the shape actually. */
+ fShape = false;
+ }
+
+ /* forward call to driver */
+ if (fShape)
+ {
+ int rc = vmmdevVerifyPointerShape(pReq);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ pThisCC->pDrv->pfnUpdatePointerShape(pThisCC->pDrv,
+ fVisible,
+ fAlpha,
+ pReq->xHot, pReq->yHot,
+ pReq->width, pReq->height,
+ pReq->pointerData);
+ }
+ else
+ {
+ pThisCC->pDrv->pfnUpdatePointerShape(pThisCC->pDrv,
+ fVisible,
+ 0,
+ 0, 0,
+ 0, 0,
+ NULL);
+ }
+
+ pThis->fHostCursorRequested = fVisible;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetHostTime.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetHostTime(PPDMDEVINS pDevIns, PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqHostTime *pReq = (VMMDevReqHostTime *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ if (RT_LIKELY(!pThis->fGetHostTimeDisabled))
+ {
+ RTTIMESPEC now;
+ pReq->time = RTTimeSpecGetMilli(PDMDevHlpTMUtcNow(pDevIns, &now));
+ return VINF_SUCCESS;
+ }
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Handles VMMDevReq_GetHypervisorInfo.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetHypervisorInfo(PPDMDEVINS pDevIns, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqHypervisorInfo *pReq = (VMMDevReqHypervisorInfo *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+#if 1 /* Obsolete for now, only used for raw-mode. */
+ RT_NOREF(pDevIns);
+ pReq->hypervisorSize = 0;
+ return VINF_SUCCESS;
+#else
+ return PGMR3MappingsSize(PDMDevHlpGetVM(pDevIns), &pReq->hypervisorSize);
+#endif
+}
+
+
+/**
+ * Handles VMMDevReq_SetHypervisorInfo.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetHypervisorInfo(PPDMDEVINS pDevIns, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqHypervisorInfo *pReq = (VMMDevReqHypervisorInfo *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ int rc;
+#if 1 /* Obsolete for now, only used for raw-mode. */
+ RT_NOREF(pDevIns);
+ if (pReq->hypervisorStart == 0 || pReq->hypervisorSize == 0)
+ rc = VINF_SUCCESS;
+ else
+ rc = VERR_TRY_AGAIN;
+#else
+ PVM pVM = PDMDevHlpGetVM(pDevIns);
+ if (pReq->hypervisorStart == 0)
+ rc = PGMR3MappingsUnfix(pVM);
+ else
+ {
+ /* only if the client has queried the size before! */
+ uint32_t cbMappings;
+ rc = PGMR3MappingsSize(pVM, &cbMappings);
+ if (RT_SUCCESS(rc) && pReq->hypervisorSize == cbMappings)
+ {
+ /* new reservation */
+ rc = PGMR3MappingsFix(pVM, pReq->hypervisorStart, pReq->hypervisorSize);
+ LogRel(("VMMDev: Guest reported fixed hypervisor window at 0%010x LB %#x (rc=%Rrc)\n",
+ pReq->hypervisorStart, pReq->hypervisorSize, rc));
+ }
+ else if (RT_FAILURE(rc)) /** @todo r=bird: This should've been RT_SUCCESS(rc)) */
+ rc = VERR_TRY_AGAIN;
+ }
+#endif
+ return rc;
+}
+
+
+/**
+ * Handles VMMDevReq_RegisterPatchMemory.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_RegisterPatchMemory(PPDMDEVINS pDevIns, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqPatchMemory *pReq = (VMMDevReqPatchMemory *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ return PDMDevHlpVMMRegisterPatchMemory(pDevIns, pReq->pPatchMem, pReq->cbPatchMem);
+}
+
+
+/**
+ * Handles VMMDevReq_DeregisterPatchMemory.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_DeregisterPatchMemory(PPDMDEVINS pDevIns, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqPatchMemory *pReq = (VMMDevReqPatchMemory *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ return PDMDevHlpVMMDeregisterPatchMemory(pDevIns, pReq->pPatchMem, pReq->cbPatchMem);
+}
+
+
+/**
+ * Handles VMMDevReq_SetPowerStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetPowerStatus(PPDMDEVINS pDevIns, PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevPowerStateRequest *pReq = (VMMDevPowerStateRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ switch (pReq->powerState)
+ {
+ case VMMDevPowerState_Pause:
+ {
+ LogRel(("VMMDev: Guest requests the VM to be suspended (paused)\n"));
+ return PDMDevHlpVMSuspend(pDevIns);
+ }
+
+ case VMMDevPowerState_PowerOff:
+ {
+ LogRel(("VMMDev: Guest requests the VM to be turned off\n"));
+ return PDMDevHlpVMPowerOff(pDevIns);
+ }
+
+ case VMMDevPowerState_SaveState:
+ {
+ if (pThis->fAllowGuestToSaveState)
+ {
+ LogRel(("VMMDev: Guest requests the VM to be saved and powered off\n"));
+ return PDMDevHlpVMSuspendSaveAndPowerOff(pDevIns);
+ }
+ LogRel(("VMMDev: Guest requests the VM to be saved and powered off, declined\n"));
+ return VERR_ACCESS_DENIED;
+ }
+
+ default:
+ AssertMsgFailed(("VMMDev: Invalid power state request: %d\n", pReq->powerState));
+ return VERR_INVALID_PARAMETER;
+ }
+}
+
+
+/**
+ * Handles VMMDevReq_GetDisplayChangeRequest
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @remarks Deprecated.
+ */
+static int vmmdevReqHandler_GetDisplayChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevDisplayChangeRequest *pReq = (VMMDevDisplayChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ DISPLAYCHANGEREQUEST *pDispRequest = &pThis->displayChangeData.aRequests[0];
+
+ if (pReq->eventAck == VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST)
+ {
+ /* Current request has been read at least once. */
+ pDispRequest->fPending = false;
+
+ /* Remember which resolution the client has queried, subsequent reads
+ * will return the same values. */
+ pDispRequest->lastReadDisplayChangeRequest = pDispRequest->displayChangeRequest;
+ pThis->displayChangeData.fGuestSentChangeEventAck = true;
+ }
+
+ /* If not a response to a VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST, just
+ * read the last valid video mode hint. This happens when the guest X server
+ * determines the initial mode. */
+ VMMDevDisplayDef const *pDisplayDef = pThis->displayChangeData.fGuestSentChangeEventAck ?
+ &pDispRequest->lastReadDisplayChangeRequest :
+ &pDispRequest->displayChangeRequest;
+ pReq->xres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CX) ? pDisplayDef->cx : 0;
+ pReq->yres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CY) ? pDisplayDef->cy : 0;
+ pReq->bpp = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_BPP) ? pDisplayDef->cBitsPerPixel : 0;
+
+ Log(("VMMDev: returning display change request xres = %d, yres = %d, bpp = %d\n", pReq->xres, pReq->yres, pReq->bpp));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetDisplayChangeRequest2.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetDisplayChangeRequest2(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
+ VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevDisplayChangeRequest2 *pReq = (VMMDevDisplayChangeRequest2 *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ DISPLAYCHANGEREQUEST *pDispRequest = NULL;
+
+ if (pReq->eventAck == VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST)
+ {
+ /* Select a pending request to report. */
+ unsigned i;
+ for (i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ if (pThis->displayChangeData.aRequests[i].fPending)
+ {
+ pDispRequest = &pThis->displayChangeData.aRequests[i];
+ /* Remember which request should be reported. */
+ pThis->displayChangeData.iCurrentMonitor = i;
+ Log3(("VMMDev: will report pending request for %u\n", i));
+ break;
+ }
+ }
+
+ /* Check if there are more pending requests. */
+ i++;
+ for (; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ if (pThis->displayChangeData.aRequests[i].fPending)
+ {
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST);
+ Log3(("VMMDev: another pending at %u\n", i));
+ break;
+ }
+ }
+
+ if (pDispRequest)
+ {
+ /* Current request has been read at least once. */
+ pDispRequest->fPending = false;
+
+ /* Remember which resolution the client has queried, subsequent reads
+ * will return the same values. */
+ pDispRequest->lastReadDisplayChangeRequest = pDispRequest->displayChangeRequest;
+ pThis->displayChangeData.fGuestSentChangeEventAck = true;
+ }
+ else
+ {
+ Log3(("VMMDev: no pending request!!!\n"));
+ }
+ }
+
+ if (!pDispRequest)
+ {
+ Log3(("VMMDev: default to %d\n", pThis->displayChangeData.iCurrentMonitor));
+ pDispRequest = &pThis->displayChangeData.aRequests[pThis->displayChangeData.iCurrentMonitor];
+ }
+
+ /* If not a response to a VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST, just
+ * read the last valid video mode hint. This happens when the guest X server
+ * determines the initial mode. */
+ VMMDevDisplayDef const *pDisplayDef = pThis->displayChangeData.fGuestSentChangeEventAck ?
+ &pDispRequest->lastReadDisplayChangeRequest :
+ &pDispRequest->displayChangeRequest;
+ pReq->xres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CX) ? pDisplayDef->cx : 0;
+ pReq->yres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CY) ? pDisplayDef->cy : 0;
+ pReq->bpp = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_BPP) ? pDisplayDef->cBitsPerPixel : 0;
+ pReq->display = pDisplayDef->idDisplay;
+
+ Log(("VMMDev: returning display change request xres = %d, yres = %d, bpp = %d at %d\n",
+ pReq->xres, pReq->yres, pReq->bpp, pReq->display));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetDisplayChangeRequestEx.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetDisplayChangeRequestEx(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
+ VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevDisplayChangeRequestEx *pReq = (VMMDevDisplayChangeRequestEx *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ DISPLAYCHANGEREQUEST *pDispRequest = NULL;
+
+ if (pReq->eventAck == VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST)
+ {
+ /* Select a pending request to report. */
+ unsigned i;
+ for (i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ if (pThis->displayChangeData.aRequests[i].fPending)
+ {
+ pDispRequest = &pThis->displayChangeData.aRequests[i];
+ /* Remember which request should be reported. */
+ pThis->displayChangeData.iCurrentMonitor = i;
+ Log3(("VMMDev: will report pending request for %d\n",
+ i));
+ break;
+ }
+ }
+
+ /* Check if there are more pending requests. */
+ i++;
+ for (; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ if (pThis->displayChangeData.aRequests[i].fPending)
+ {
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST);
+ Log3(("VMMDev: another pending at %d\n",
+ i));
+ break;
+ }
+ }
+
+ if (pDispRequest)
+ {
+ /* Current request has been read at least once. */
+ pDispRequest->fPending = false;
+
+ /* Remember which resolution the client has queried, subsequent reads
+ * will return the same values. */
+ pDispRequest->lastReadDisplayChangeRequest = pDispRequest->displayChangeRequest;
+ pThis->displayChangeData.fGuestSentChangeEventAck = true;
+ }
+ else
+ {
+ Log3(("VMMDev: no pending request!!!\n"));
+ }
+ }
+
+ if (!pDispRequest)
+ {
+ Log3(("VMMDev: default to %d\n",
+ pThis->displayChangeData.iCurrentMonitor));
+ pDispRequest = &pThis->displayChangeData.aRequests[pThis->displayChangeData.iCurrentMonitor];
+ }
+
+ /* If not a response to a VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST, just
+ * read the last valid video mode hint. This happens when the guest X server
+ * determines the initial mode. */
+ VMMDevDisplayDef const *pDisplayDef = pThis->displayChangeData.fGuestSentChangeEventAck ?
+ &pDispRequest->lastReadDisplayChangeRequest :
+ &pDispRequest->displayChangeRequest;
+ pReq->xres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CX) ? pDisplayDef->cx : 0;
+ pReq->yres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CY) ? pDisplayDef->cy : 0;
+ pReq->bpp = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_BPP) ? pDisplayDef->cBitsPerPixel : 0;
+ pReq->display = pDisplayDef->idDisplay;
+ pReq->cxOrigin = pDisplayDef->xOrigin;
+ pReq->cyOrigin = pDisplayDef->yOrigin;
+ pReq->fEnabled = !RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_DISABLED);
+ pReq->fChangeOrigin = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN);
+
+ Log(("VMMDevEx: returning display change request xres = %d, yres = %d, bpp = %d id %d xPos = %d, yPos = %d & Enabled=%d\n",
+ pReq->xres, pReq->yres, pReq->bpp, pReq->display, pReq->cxOrigin, pReq->cyOrigin, pReq->fEnabled));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetDisplayChangeRequestMulti.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetDisplayChangeRequestMulti(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevDisplayChangeRequestMulti *pReq = (VMMDevDisplayChangeRequestMulti *)pReqHdr;
+ unsigned i;
+
+ ASSERT_GUEST_MSG_RETURN(pReq->header.size >= sizeof(*pReq),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ uint32_t const cDisplays = pReq->cDisplays;
+ ASSERT_GUEST_MSG_RETURN(cDisplays > 0 && cDisplays <= RT_ELEMENTS(pThis->displayChangeData.aRequests),
+ ("cDisplays %u\n", cDisplays), VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ ASSERT_GUEST_MSG_RETURN(pReq->header.size >= sizeof(*pReq) + (cDisplays - 1) * sizeof(VMMDevDisplayDef),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ if (pReq->eventAck == VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST)
+ {
+ uint32_t cDisplaysOut = 0;
+ /* Remember which resolution the client has queried, subsequent reads
+ * will return the same values. */
+ for (i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); ++i)
+ {
+ DISPLAYCHANGEREQUEST *pDCR = &pThis->displayChangeData.aRequests[i];
+
+ pDCR->lastReadDisplayChangeRequest = pDCR->displayChangeRequest;
+
+ if (pDCR->fPending)
+ {
+ if (cDisplaysOut < cDisplays)
+ pReq->aDisplays[cDisplaysOut] = pDCR->lastReadDisplayChangeRequest;
+
+ cDisplaysOut++;
+ pDCR->fPending = false;
+ }
+ }
+
+ pReq->cDisplays = cDisplaysOut;
+ pThis->displayChangeData.fGuestSentChangeEventAck = true;
+ }
+ else
+ {
+ /* Fill the guest request with monitor layout data. */
+ for (i = 0; i < cDisplays; ++i)
+ {
+ /* If not a response to a VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST, just
+ * read the last valid video mode hint. This happens when the guest X server
+ * determines the initial mode. */
+ DISPLAYCHANGEREQUEST const *pDCR = &pThis->displayChangeData.aRequests[i];
+ VMMDevDisplayDef const *pDisplayDef = pThis->displayChangeData.fGuestSentChangeEventAck ?
+ &pDCR->lastReadDisplayChangeRequest :
+ &pDCR->displayChangeRequest;
+ pReq->aDisplays[i] = *pDisplayDef;
+ }
+ }
+
+ Log(("VMMDev: returning multimonitor display change request cDisplays %d\n", cDisplays));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_VideoModeSupported.
+ *
+ * Query whether the given video mode is supported.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoModeSupported(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoModeSupportedRequest *pReq = (VMMDevVideoModeSupportedRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* forward the call */
+ return pThisCC->pDrv->pfnVideoModeSupported(pThisCC->pDrv,
+ 0, /* primary screen. */
+ pReq->width,
+ pReq->height,
+ pReq->bpp,
+ &pReq->fSupported);
+}
+
+
+/**
+ * Handles VMMDevReq_VideoModeSupported2.
+ *
+ * Query whether the given video mode is supported for a specific display
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoModeSupported2(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoModeSupportedRequest2 *pReq = (VMMDevVideoModeSupportedRequest2 *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* forward the call */
+ return pThisCC->pDrv->pfnVideoModeSupported(pThisCC->pDrv,
+ pReq->display,
+ pReq->width,
+ pReq->height,
+ pReq->bpp,
+ &pReq->fSupported);
+}
+
+
+
+/**
+ * Handles VMMDevReq_GetHeightReduction.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetHeightReduction(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevGetHeightReductionRequest *pReq = (VMMDevGetHeightReductionRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* forward the call */
+ return pThisCC->pDrv->pfnGetHeightReduction(pThisCC->pDrv, &pReq->heightReduction);
+}
+
+
+/**
+ * Handles VMMDevReq_AcknowledgeEvents.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_AcknowledgeEvents(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevEvents *pReq = (VMMDevEvents *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ STAM_REL_COUNTER_INC(&pThis->StatSlowIrqAck);
+
+ if (!VMMDEV_INTERFACE_VERSION_IS_1_03(pThis))
+ {
+ /*
+ * Note! This code is duplicated in vmmdevFastRequestIrqAck.
+ */
+ if (pThis->fNewGuestFilterMaskValid)
+ {
+ pThis->fNewGuestFilterMaskValid = false;
+ pThis->fGuestFilterMask = pThis->fNewGuestFilterMask;
+ }
+
+ pReq->events = pThis->fHostEventFlags & pThis->fGuestFilterMask;
+
+ pThis->fHostEventFlags &= ~pThis->fGuestFilterMask;
+ pThisCC->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false;
+
+ PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0);
+ }
+ else
+ vmmdevSetIRQ_Legacy(pDevIns, pThis, pThisCC);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_CtlGuestFilterMask.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_CtlGuestFilterMask(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevCtlGuestFilterMask *pReq = (VMMDevCtlGuestFilterMask *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ LogRelFlow(("VMMDev: vmmdevReqHandler_CtlGuestFilterMask: OR mask: %#x, NOT mask: %#x\n", pReq->u32OrMask, pReq->u32NotMask));
+
+ /* HGCM event notification is enabled by the VMMDev device
+ * automatically when any HGCM command is issued. The guest
+ * cannot disable these notifications. */
+ VMMDevCtlSetGuestFilterMask(pDevIns, pThis, pThisCC, pReq->u32OrMask, pReq->u32NotMask & ~VMMDEV_EVENT_HGCM);
+ return VINF_SUCCESS;
+}
+
+#ifdef VBOX_WITH_HGCM
+
+/**
+ * Handles VMMDevReq_HGCMConnect.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @param GCPhysReqHdr The guest physical address of the request header.
+ */
+static int vmmdevReqHandler_HGCMConnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
+ VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr)
+{
+ VMMDevHGCMConnect *pReq = (VMMDevHGCMConnect *)pReqHdr;
+ AssertMsgReturn(pReq->header.header.size >= sizeof(*pReq), ("%u\n", pReq->header.header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this is >= ... */
+
+ if (pThisCC->pHGCMDrv)
+ {
+ Log(("VMMDevReq_HGCMConnect\n"));
+ return vmmdevR3HgcmConnect(pDevIns, pThis, pThisCC, pReq, GCPhysReqHdr);
+ }
+
+ Log(("VMMDevReq_HGCMConnect: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Handles VMMDevReq_HGCMDisconnect.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @param GCPhysReqHdr The guest physical address of the request header.
+ */
+static int vmmdevReqHandler_HGCMDisconnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
+ VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr)
+{
+ VMMDevHGCMDisconnect *pReq = (VMMDevHGCMDisconnect *)pReqHdr;
+ AssertMsgReturn(pReq->header.header.size >= sizeof(*pReq), ("%u\n", pReq->header.header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (pThisCC->pHGCMDrv)
+ {
+ Log(("VMMDevReq_VMMDevHGCMDisconnect\n"));
+ return vmmdevR3HgcmDisconnect(pDevIns, pThis, pThisCC, pReq, GCPhysReqHdr);
+ }
+
+ Log(("VMMDevReq_VMMDevHGCMDisconnect: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Handles VMMDevReq_HGCMCall32 and VMMDevReq_HGCMCall64.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @param GCPhysReqHdr The guest physical address of the request header.
+ * @param tsArrival The STAM_GET_TS() value when the request arrived.
+ * @param ppLock Pointer to the lock info pointer (latter can be
+ * NULL). Set to NULL if HGCM takes lock ownership.
+ */
+static int vmmdevReqHandler_HGCMCall(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr,
+ RTGCPHYS GCPhysReqHdr, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock)
+{
+ VMMDevHGCMCall *pReq = (VMMDevHGCMCall *)pReqHdr;
+ AssertMsgReturn(pReq->header.header.size >= sizeof(*pReq), ("%u\n", pReq->header.header.size), VERR_INVALID_PARAMETER);
+
+ if (pThisCC->pHGCMDrv)
+ {
+ Log2(("VMMDevReq_HGCMCall: sizeof(VMMDevHGCMRequest) = %04X\n", sizeof(VMMDevHGCMCall)));
+ Log2(("%.*Rhxd\n", pReq->header.header.size, pReq));
+
+ return vmmdevR3HgcmCall(pDevIns, pThis, pThisCC, pReq, pReq->header.header.size, GCPhysReqHdr,
+ pReq->header.header.requestType, tsArrival, ppLock);
+ }
+
+ Log(("VMMDevReq_HGCMCall: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+/**
+ * Handles VMMDevReq_HGCMCancel.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @param GCPhysReqHdr The guest physical address of the request header.
+ */
+static int vmmdevReqHandler_HGCMCancel(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr)
+{
+ VMMDevHGCMCancel *pReq = (VMMDevHGCMCancel *)pReqHdr;
+ AssertMsgReturn(pReq->header.header.size >= sizeof(*pReq), ("%u\n", pReq->header.header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (pThisCC->pHGCMDrv)
+ {
+ Log(("VMMDevReq_VMMDevHGCMCancel\n"));
+ return vmmdevR3HgcmCancel(pThisCC, pReq, GCPhysReqHdr);
+ }
+
+ Log(("VMMDevReq_VMMDevHGCMCancel: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Handles VMMDevReq_HGCMCancel2.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_HGCMCancel2(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevHGCMCancel2 *pReq = (VMMDevHGCMCancel2 *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (pThisCC->pHGCMDrv)
+ {
+ Log(("VMMDevReq_HGCMCancel2\n"));
+ return vmmdevR3HgcmCancel2(pThisCC, pReq->physReqToCancel);
+ }
+
+ Log(("VMMDevReq_HGCMCancel2: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+#endif /* VBOX_WITH_HGCM */
+
+
+/**
+ * Handles VMMDevReq_VideoAccelEnable.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoAccelEnable(PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoAccelEnable *pReq = (VMMDevVideoAccelEnable *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (!pThisCC->pDrv)
+ {
+ Log(("VMMDevReq_VideoAccelEnable Connector is NULL!!\n"));
+ return VERR_NOT_SUPPORTED;
+ }
+
+ if (pReq->cbRingBuffer != VMMDEV_VBVA_RING_BUFFER_SIZE)
+ {
+ /* The guest driver seems compiled with different headers. */
+ LogRelMax(16,("VMMDevReq_VideoAccelEnable guest ring buffer size %#x, should be %#x!!\n", pReq->cbRingBuffer, VMMDEV_VBVA_RING_BUFFER_SIZE));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /* The request is correct. */
+ pReq->fu32Status |= VBVA_F_STATUS_ACCEPTED;
+
+ LogFlow(("VMMDevReq_VideoAccelEnable pReq->u32Enable = %d\n", pReq->u32Enable));
+
+ int rc = pReq->u32Enable
+ ? pThisCC->pDrv->pfnVideoAccelEnable(pThisCC->pDrv, true, &pThisCC->pVMMDevRAMR3->vbvaMemory)
+ : pThisCC->pDrv->pfnVideoAccelEnable(pThisCC->pDrv, false, NULL);
+
+ if ( pReq->u32Enable
+ && RT_SUCCESS(rc))
+ {
+ pReq->fu32Status |= VBVA_F_STATUS_ENABLED;
+
+ /* Remember that guest successfully enabled acceleration.
+ * We need to reestablish it on restoring the VM from saved state.
+ */
+ pThis->u32VideoAccelEnabled = 1;
+ }
+ else
+ {
+ /* The acceleration was not enabled. Remember that. */
+ pThis->u32VideoAccelEnabled = 0;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_VideoAccelFlush.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoAccelFlush(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoAccelFlush *pReq = (VMMDevVideoAccelFlush *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (!pThisCC->pDrv)
+ {
+ Log(("VMMDevReq_VideoAccelFlush: Connector is NULL!!!\n"));
+ return VERR_NOT_SUPPORTED;
+ }
+
+ pThisCC->pDrv->pfnVideoAccelFlush(pThisCC->pDrv);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_VideoSetVisibleRegion.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoSetVisibleRegion(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoSetVisibleRegion *pReq = (VMMDevVideoSetVisibleRegion *)pReqHdr;
+ AssertMsgReturn(pReq->header.size + sizeof(RTRECT) >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ if (!pThisCC->pDrv)
+ {
+ Log(("VMMDevReq_VideoSetVisibleRegion: Connector is NULL!!!\n"));
+ return VERR_NOT_SUPPORTED;
+ }
+
+ if ( pReq->cRect > _1M /* restrict to sane range */
+ || pReq->header.size != sizeof(VMMDevVideoSetVisibleRegion) + pReq->cRect * sizeof(RTRECT) - sizeof(RTRECT))
+ {
+ Log(("VMMDevReq_VideoSetVisibleRegion: cRects=%#x doesn't match size=%#x or is out of bounds\n",
+ pReq->cRect, pReq->header.size));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ Log(("VMMDevReq_VideoSetVisibleRegion %d rectangles\n", pReq->cRect));
+ /* forward the call */
+ return pThisCC->pDrv->pfnSetVisibleRegion(pThisCC->pDrv, pReq->cRect, &pReq->Rect);
+}
+
+/**
+ * Handles VMMDevReq_VideoUpdateMonitorPositions.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoUpdateMonitorPositions(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoUpdateMonitorPositions *pReq = (VMMDevVideoUpdateMonitorPositions *)pReqHdr;
+ AssertMsgReturn(pReq->header.size + sizeof(RTRECT) >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ if (!pThisCC->pDrv)
+ {
+ Log(("VMMDevReq_VideoUpdateMonitorPositions: Connector is NULL!!!\n"));
+ return VERR_NOT_SUPPORTED;
+ }
+ if ( pReq->cPositions > _1M /* restrict to sane range */
+ || pReq->header.size != sizeof(VMMDevVideoUpdateMonitorPositions) + pReq->cPositions * sizeof(RTPOINT) - sizeof(RTPOINT))
+ {
+ Log(("VMMDevReq_VideoUpdateMonitorPositions: cRects=%#x doesn't match size=%#x or is out of bounds\n",
+ pReq->cPositions, pReq->header.size));
+ return VERR_INVALID_PARAMETER;
+ }
+ Log(("VMMDevReq_VideoUpdateMonitorPositions %d rectangles\n", pReq->cPositions));
+ /* forward the call */
+ return pThisCC->pDrv->pfnUpdateMonitorPositions(pThisCC->pDrv, pReq->cPositions, &(pReq->aPositions[0]));
+}
+
+/**
+ * Handles VMMDevReq_GetSeamlessChangeRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetSeamlessChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevSeamlessChangeRequest *pReq = (VMMDevSeamlessChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* just pass on the information */
+ Log(("VMMDev: returning seamless change request mode=%d\n", pThis->fSeamlessEnabled));
+ if (pThis->fSeamlessEnabled)
+ pReq->mode = VMMDev_Seamless_Visible_Region;
+ else
+ pReq->mode = VMMDev_Seamless_Disabled;
+
+ if (pReq->eventAck == VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
+ {
+ /* Remember which mode the client has queried. */
+ pThis->fLastSeamlessEnabled = pThis->fSeamlessEnabled;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetVRDPChangeRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetVRDPChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVRDPChangeRequest *pReq = (VMMDevVRDPChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* just pass on the information */
+ Log(("VMMDev: returning VRDP status %d level %d\n", pThis->fVRDPEnabled, pThis->uVRDPExperienceLevel));
+
+ pReq->u8VRDPActive = pThis->fVRDPEnabled;
+ pReq->u32VRDPExperienceLevel = pThis->uVRDPExperienceLevel;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetMemBalloonChangeRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetMemBalloonChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevGetMemBalloonChangeRequest *pReq = (VMMDevGetMemBalloonChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* just pass on the information */
+ Log(("VMMDev: returning memory balloon size =%d\n", pThis->cMbMemoryBalloon));
+ pReq->cBalloonChunks = pThis->cMbMemoryBalloon;
+ pReq->cPhysMemChunks = pThis->cbGuestRAM / (uint64_t)_1M;
+
+ if (pReq->eventAck == VMMDEV_EVENT_BALLOON_CHANGE_REQUEST)
+ {
+ /* Remember which mode the client has queried. */
+ pThis->cMbMemoryBalloonLast = pThis->cMbMemoryBalloon;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ChangeMemBalloon.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ChangeMemBalloon(PPDMDEVINS pDevIns, PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevChangeMemBalloon *pReq = (VMMDevChangeMemBalloon *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pReq->cPages == VMMDEV_MEMORY_BALLOON_CHUNK_PAGES, ("%u\n", pReq->cPages), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pReq->header.size == (uint32_t)RT_UOFFSETOF_DYN(VMMDevChangeMemBalloon, aPhysPage[pReq->cPages]),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ Log(("VMMDevReq_ChangeMemBalloon\n"));
+ int rc = PDMDevHlpPhysChangeMemBalloon(pDevIns, !!pReq->fInflate, pReq->cPages, pReq->aPhysPage);
+ if (pReq->fInflate)
+ STAM_REL_U32_INC(&pThis->StatMemBalloonChunks);
+ else
+ STAM_REL_U32_DEC(&pThis->StatMemBalloonChunks);
+ return rc;
+}
+
+
+/**
+ * Handles VMMDevReq_GetStatisticsChangeRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetStatisticsChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevGetStatisticsChangeRequest *pReq = (VMMDevGetStatisticsChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ Log(("VMMDevReq_GetStatisticsChangeRequest\n"));
+ /* just pass on the information */
+ Log(("VMMDev: returning statistics interval %d seconds\n", pThis->cSecsStatInterval));
+ pReq->u32StatInterval = pThis->cSecsStatInterval;
+
+ if (pReq->eventAck == VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST)
+ {
+ /* Remember which mode the client has queried. */
+ pThis->cSecsLastStatInterval = pThis->cSecsStatInterval;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestStats.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestStats(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReportGuestStats *pReq = (VMMDevReportGuestStats *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ Log(("VMMDevReq_ReportGuestStats\n"));
+#ifdef LOG_ENABLED
+ VBoxGuestStatistics *pGuestStats = &pReq->guestStats;
+
+ Log(("Current statistics:\n"));
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_CPU_LOAD_IDLE)
+ Log(("CPU%u: CPU Load Idle %-3d%%\n", pGuestStats->u32CpuId, pGuestStats->u32CpuLoad_Idle));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_CPU_LOAD_KERNEL)
+ Log(("CPU%u: CPU Load Kernel %-3d%%\n", pGuestStats->u32CpuId, pGuestStats->u32CpuLoad_Kernel));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_CPU_LOAD_USER)
+ Log(("CPU%u: CPU Load User %-3d%%\n", pGuestStats->u32CpuId, pGuestStats->u32CpuLoad_User));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_THREADS)
+ Log(("CPU%u: Thread %d\n", pGuestStats->u32CpuId, pGuestStats->u32Threads));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PROCESSES)
+ Log(("CPU%u: Processes %d\n", pGuestStats->u32CpuId, pGuestStats->u32Processes));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_HANDLES)
+ Log(("CPU%u: Handles %d\n", pGuestStats->u32CpuId, pGuestStats->u32Handles));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEMORY_LOAD)
+ Log(("CPU%u: Memory Load %d%%\n", pGuestStats->u32CpuId, pGuestStats->u32MemoryLoad));
+
+ /* Note that reported values are in pages; upper layers expect them in megabytes */
+ Log(("CPU%u: Page size %-4d bytes\n", pGuestStats->u32CpuId, pGuestStats->u32PageSize));
+ Assert(pGuestStats->u32PageSize == 4096);
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PHYS_MEM_TOTAL)
+ Log(("CPU%u: Total physical memory %-4d MB\n", pGuestStats->u32CpuId, (pGuestStats->u32PhysMemTotal + (_1M/_4K)-1) / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PHYS_MEM_AVAIL)
+ Log(("CPU%u: Free physical memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32PhysMemAvail / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PHYS_MEM_BALLOON)
+ Log(("CPU%u: Memory balloon size %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32PhysMemBalloon / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_COMMIT_TOTAL)
+ Log(("CPU%u: Committed memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemCommitTotal / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_KERNEL_TOTAL)
+ Log(("CPU%u: Total kernel memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemKernelTotal / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_KERNEL_PAGED)
+ Log(("CPU%u: Paged kernel memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemKernelPaged / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_KERNEL_NONPAGED)
+ Log(("CPU%u: Nonpaged kernel memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemKernelNonPaged / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_SYSTEM_CACHE)
+ Log(("CPU%u: System cache size %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemSystemCache / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PAGE_FILE_SIZE)
+ Log(("CPU%u: Page file size %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32PageFileSize / (_1M/_4K)));
+ Log(("Statistics end *******************\n"));
+#endif /* LOG_ENABLED */
+
+ /* forward the call */
+ return pThisCC->pDrv->pfnReportStatistics(pThisCC->pDrv, &pReq->guestStats);
+}
+
+
+/**
+ * Handles VMMDevReq_QueryCredentials.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_QueryCredentials(PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevCredentials *pReq = (VMMDevCredentials *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ VMMDEVCREDS *pCredentials = pThisCC->pCredentials;
+ AssertPtrReturn(pCredentials, VERR_NOT_SUPPORTED);
+
+ /* let's start by nulling out the data */
+ RT_ZERO(pReq->szUserName);
+ RT_ZERO(pReq->szPassword);
+ RT_ZERO(pReq->szDomain);
+
+ /* should we return whether we got credentials for a logon? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_QUERYPRESENCE)
+ {
+ if ( pCredentials->Logon.szUserName[0]
+ || pCredentials->Logon.szPassword[0]
+ || pCredentials->Logon.szDomain[0])
+ pReq->u32Flags |= VMMDEV_CREDENTIALS_PRESENT;
+ else
+ pReq->u32Flags &= ~VMMDEV_CREDENTIALS_PRESENT;
+ }
+
+ /* does the guest want to read logon credentials? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_READ)
+ {
+ if (pCredentials->Logon.szUserName[0])
+ RTStrCopy(pReq->szUserName, sizeof(pReq->szUserName), pCredentials->Logon.szUserName);
+ if (pCredentials->Logon.szPassword[0])
+ RTStrCopy(pReq->szPassword, sizeof(pReq->szPassword), pCredentials->Logon.szPassword);
+ if (pCredentials->Logon.szDomain[0])
+ RTStrCopy(pReq->szDomain, sizeof(pReq->szDomain), pCredentials->Logon.szDomain);
+ if (!pCredentials->Logon.fAllowInteractiveLogon)
+ pReq->u32Flags |= VMMDEV_CREDENTIALS_NOLOCALLOGON;
+ else
+ pReq->u32Flags &= ~VMMDEV_CREDENTIALS_NOLOCALLOGON;
+ }
+
+ if (!pThis->fKeepCredentials)
+ {
+ /* does the caller want us to destroy the logon credentials? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_CLEAR)
+ {
+ RT_ZERO(pCredentials->Logon.szUserName);
+ RT_ZERO(pCredentials->Logon.szPassword);
+ RT_ZERO(pCredentials->Logon.szDomain);
+ }
+ }
+
+ /* does the guest want to read credentials for verification? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_READJUDGE)
+ {
+ if (pCredentials->Judge.szUserName[0])
+ RTStrCopy(pReq->szUserName, sizeof(pReq->szUserName), pCredentials->Judge.szUserName);
+ if (pCredentials->Judge.szPassword[0])
+ RTStrCopy(pReq->szPassword, sizeof(pReq->szPassword), pCredentials->Judge.szPassword);
+ if (pCredentials->Judge.szDomain[0])
+ RTStrCopy(pReq->szDomain, sizeof(pReq->szDomain), pCredentials->Judge.szDomain);
+ }
+
+ /* does the caller want us to destroy the judgement credentials? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_CLEARJUDGE)
+ {
+ RT_ZERO(pCredentials->Judge.szUserName);
+ RT_ZERO(pCredentials->Judge.szPassword);
+ RT_ZERO(pCredentials->Judge.szDomain);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportCredentialsJudgement.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportCredentialsJudgement(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevCredentials *pReq = (VMMDevCredentials *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* what does the guest think about the credentials? (note: the order is important here!) */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_JUDGE_DENY)
+ pThisCC->pDrv->pfnSetCredentialsJudgementResult(pThisCC->pDrv, VMMDEV_CREDENTIALS_JUDGE_DENY);
+ else if (pReq->u32Flags & VMMDEV_CREDENTIALS_JUDGE_NOJUDGEMENT)
+ pThisCC->pDrv->pfnSetCredentialsJudgementResult(pThisCC->pDrv, VMMDEV_CREDENTIALS_JUDGE_NOJUDGEMENT);
+ else if (pReq->u32Flags & VMMDEV_CREDENTIALS_JUDGE_OK)
+ pThisCC->pDrv->pfnSetCredentialsJudgementResult(pThisCC->pDrv, VMMDEV_CREDENTIALS_JUDGE_OK);
+ else
+ {
+ Log(("VMMDevReq_ReportCredentialsJudgement: invalid flags: %d!!!\n", pReq->u32Flags));
+ /** @todo why don't we return VERR_INVALID_PARAMETER to the guest? */
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetHostVersion.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pReqHdr The header of the request to handle.
+ * @since 3.1.0
+ * @note The ring-0 VBoxGuestLib uses this to check whether
+ * VMMDevHGCMParmType_PageList is supported.
+ */
+static int vmmdevReqHandler_GetHostVersion(VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqHostVersion *pReq = (VMMDevReqHostVersion *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->major = RTBldCfgVersionMajor();
+ pReq->minor = RTBldCfgVersionMinor();
+ pReq->build = RTBldCfgVersionBuild();
+ pReq->revision = RTBldCfgRevision();
+ pReq->features = VMMDEV_HVF_HGCM_PHYS_PAGE_LIST
+ | VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS
+ | VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST
+ | VMMDEV_HVF_HGCM_NO_BOUNCE_PAGE_LIST
+ | VMMDEV_HVF_FAST_IRQ_ACK;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetCpuHotPlugRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetCpuHotPlugRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevGetCpuHotPlugRequest *pReq = (VMMDevGetCpuHotPlugRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->enmEventType = pThis->enmCpuHotPlugEvent;
+ pReq->idCpuCore = pThis->idCpuCore;
+ pReq->idCpuPackage = pThis->idCpuPackage;
+
+ /* Clear the event */
+ pThis->enmCpuHotPlugEvent = VMMDevCpuEventType_None;
+ pThis->idCpuCore = UINT32_MAX;
+ pThis->idCpuPackage = UINT32_MAX;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_SetCpuHotPlugStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetCpuHotPlugStatus(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevCpuHotPlugStatusRequest *pReq = (VMMDevCpuHotPlugStatusRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ if (pReq->enmStatusType == VMMDevCpuStatusType_Disable)
+ pThis->fCpuHotPlugEventsEnabled = false;
+ else if (pReq->enmStatusType == VMMDevCpuStatusType_Enable)
+ pThis->fCpuHotPlugEventsEnabled = true;
+ else
+ return VERR_INVALID_PARAMETER;
+ return VINF_SUCCESS;
+}
+
+
+#ifdef DEBUG
+/**
+ * Handles VMMDevReq_LogString.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_LogString(VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqLogString *pReq = (VMMDevReqLogString *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pReq->szString[pReq->header.size - RT_UOFFSETOF(VMMDevReqLogString, szString) - 1] == '\0',
+ ("not null terminated\n"), VERR_INVALID_PARAMETER);
+
+ LogIt(RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP_DEV_VMM_BACKDOOR, ("DEBUG LOG: %s", pReq->szString));
+ return VINF_SUCCESS;
+}
+#endif /* DEBUG */
+
+/**
+ * Handles VMMDevReq_GetSessionId.
+ *
+ * Get a unique "session" ID for this VM, where the ID will be different after each
+ * start, reset or restore of the VM. This can be used for restore detection
+ * inside the guest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetSessionId(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqSessionId *pReq = (VMMDevReqSessionId *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->idSession = pThis->idSession;
+ return VINF_SUCCESS;
+}
+
+
+#ifdef VBOX_WITH_PAGE_SHARING
+
+/**
+ * Handles VMMDevReq_RegisterSharedModule.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_RegisterSharedModule(PPDMDEVINS pDevIns, VMMDevRequestHeader *pReqHdr)
+{
+ /*
+ * Basic input validation (more done by GMM).
+ */
+ VMMDevSharedModuleRegistrationRequest *pReq = (VMMDevSharedModuleRegistrationRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(VMMDevSharedModuleRegistrationRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pReq->header.size == RT_UOFFSETOF_DYN(VMMDevSharedModuleRegistrationRequest, aRegions[pReq->cRegions]),
+ ("%u cRegions=%u\n", pReq->header.size, pReq->cRegions), VERR_INVALID_PARAMETER);
+
+ AssertReturn(RTStrEnd(pReq->szName, sizeof(pReq->szName)), VERR_INVALID_PARAMETER);
+ AssertReturn(RTStrEnd(pReq->szVersion, sizeof(pReq->szVersion)), VERR_INVALID_PARAMETER);
+ int rc = RTStrValidateEncoding(pReq->szName);
+ AssertRCReturn(rc, rc);
+ rc = RTStrValidateEncoding(pReq->szVersion);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Forward the request to the VMM.
+ */
+ return PDMDevHlpSharedModuleRegister(pDevIns, pReq->enmGuestOS, pReq->szName, pReq->szVersion,
+ pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
+}
+
+/**
+ * Handles VMMDevReq_UnregisterSharedModule.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_UnregisterSharedModule(PPDMDEVINS pDevIns, VMMDevRequestHeader *pReqHdr)
+{
+ /*
+ * Basic input validation.
+ */
+ VMMDevSharedModuleUnregistrationRequest *pReq = (VMMDevSharedModuleUnregistrationRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevSharedModuleUnregistrationRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ AssertReturn(RTStrEnd(pReq->szName, sizeof(pReq->szName)), VERR_INVALID_PARAMETER);
+ AssertReturn(RTStrEnd(pReq->szVersion, sizeof(pReq->szVersion)), VERR_INVALID_PARAMETER);
+ int rc = RTStrValidateEncoding(pReq->szName);
+ AssertRCReturn(rc, rc);
+ rc = RTStrValidateEncoding(pReq->szVersion);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Forward the request to the VMM.
+ */
+ return PDMDevHlpSharedModuleUnregister(pDevIns, pReq->szName, pReq->szVersion,
+ pReq->GCBaseAddr, pReq->cbModule);
+}
+
+/**
+ * Handles VMMDevReq_CheckSharedModules.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_CheckSharedModules(PPDMDEVINS pDevIns, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevSharedModuleCheckRequest *pReq = (VMMDevSharedModuleCheckRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevSharedModuleCheckRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ return PDMDevHlpSharedModuleCheckAll(pDevIns);
+}
+
+/**
+ * Handles VMMDevReq_GetPageSharingStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetPageSharingStatus(PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevPageSharingStatusRequest *pReq = (VMMDevPageSharingStatusRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevPageSharingStatusRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->fEnabled = false;
+ int rc = pThisCC->pDrv->pfnIsPageFusionEnabled(pThisCC->pDrv, &pReq->fEnabled);
+ if (RT_FAILURE(rc))
+ pReq->fEnabled = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_DebugIsPageShared.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_DebugIsPageShared(PPDMDEVINS pDevIns, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevPageIsSharedRequest *pReq = (VMMDevPageIsSharedRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevPageIsSharedRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ return PDMDevHlpSharedModuleGetPageState(pDevIns, pReq->GCPtrPage, &pReq->fShared, &pReq->uPageFlags);
+}
+
+#endif /* VBOX_WITH_PAGE_SHARING */
+
+
+/**
+ * Handles VMMDevReq_WriteCoreDumpe
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pReqHdr Pointer to the request header.
+ */
+static int vmmdevReqHandler_WriteCoreDump(PPDMDEVINS pDevIns, PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqWriteCoreDump *pReq = (VMMDevReqWriteCoreDump *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevReqWriteCoreDump), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /*
+ * Only available if explicitly enabled by the user.
+ */
+ if (!pThis->fGuestCoreDumpEnabled)
+ return VERR_ACCESS_DENIED;
+
+ /*
+ * User makes sure the directory exists before composing the path.
+ */
+ if (!RTDirExists(pThis->szGuestCoreDumpDir))
+ return VERR_PATH_NOT_FOUND;
+
+ char szCorePath[RTPATH_MAX];
+ RTStrCopy(szCorePath, sizeof(szCorePath), pThis->szGuestCoreDumpDir);
+ RTPathAppend(szCorePath, sizeof(szCorePath), "VBox.core");
+
+ /*
+ * Rotate existing cores based on number of additional cores to keep around.
+ */
+ if (pThis->cGuestCoreDumps > 0)
+ for (int64_t i = pThis->cGuestCoreDumps - 1; i >= 0; i--)
+ {
+ char szFilePathOld[RTPATH_MAX];
+ if (i == 0)
+ RTStrCopy(szFilePathOld, sizeof(szFilePathOld), szCorePath);
+ else
+ RTStrPrintf(szFilePathOld, sizeof(szFilePathOld), "%s.%lld", szCorePath, i);
+
+ char szFilePathNew[RTPATH_MAX];
+ RTStrPrintf(szFilePathNew, sizeof(szFilePathNew), "%s.%lld", szCorePath, i + 1);
+ int vrc = RTFileMove(szFilePathOld, szFilePathNew, RTFILEMOVE_FLAGS_REPLACE);
+ if (vrc == VERR_FILE_NOT_FOUND)
+ RTFileDelete(szFilePathNew);
+ }
+
+ /*
+ * Write the core file.
+ */
+ return PDMDevHlpDBGFCoreWrite(pDevIns, szCorePath, true /*fReplaceFile*/);
+}
+
+
+/**
+ * Sets request status to VINF_HGCM_ASYNC_EXECUTE.
+ *
+ * @param pDevIns The device instance.
+ * @param GCPhysReqHdr The guest physical address of the request.
+ * @param pLock Pointer to the request locking info. NULL if not
+ * locked.
+ */
+DECLINLINE(void) vmmdevReqHdrSetHgcmAsyncExecute(PPDMDEVINS pDevIns, RTGCPHYS GCPhysReqHdr, PVMMDEVREQLOCK pLock)
+{
+ if (pLock)
+ ((VMMDevRequestHeader volatile *)pLock->pvReq)->rc = VINF_HGCM_ASYNC_EXECUTE;
+ else
+ {
+ int32_t rcReq = VINF_HGCM_ASYNC_EXECUTE;
+ PDMDevHlpPhysWrite(pDevIns, GCPhysReqHdr + RT_UOFFSETOF(VMMDevRequestHeader, rc), &rcReq, sizeof(rcReq));
+ }
+}
+
+
+/** @name VMMDEVREQDISP_POST_F_XXX - post dispatcher optimizations.
+ * @{ */
+#define VMMDEVREQDISP_POST_F_NO_WRITE_OUT RT_BIT_32(0)
+/** @} */
+
+
+/**
+ * Dispatch the request to the appropriate handler function.
+ *
+ * @returns Port I/O handler exit code.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pReqHdr The request header (cached in host memory).
+ * @param GCPhysReqHdr The guest physical address of the request (for
+ * HGCM).
+ * @param tsArrival The STAM_GET_TS() value when the request arrived.
+ * @param pfPostOptimize HGCM optimizations, VMMDEVREQDISP_POST_F_XXX.
+ * @param ppLock Pointer to the lock info pointer (latter can be
+ * NULL). Set to NULL if HGCM takes lock ownership.
+ */
+static VBOXSTRICTRC vmmdevReqDispatcher(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, VMMDevRequestHeader *pReqHdr,
+ RTGCPHYS GCPhysReqHdr, uint64_t tsArrival, uint32_t *pfPostOptimize,
+ PVMMDEVREQLOCK *ppLock)
+{
+ int rcRet = VINF_SUCCESS;
+ Assert(*pfPostOptimize == 0);
+ switch (pReqHdr->requestType)
+ {
+ case VMMDevReq_ReportGuestInfo:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestInfo(pDevIns, pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestInfo2:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestInfo2(pDevIns, pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestStatus:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestStatus(pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestUserState:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestUserState(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestCapabilities:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestCapabilities(pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_SetGuestCapabilities:
+ pReqHdr->rc = vmmdevReqHandler_SetGuestCapabilities(pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_WriteCoreDump:
+ pReqHdr->rc = vmmdevReqHandler_WriteCoreDump(pDevIns, pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetMouseStatus:
+ pReqHdr->rc = vmmdevReqHandler_GetMouseStatus(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetMouseStatusEx:
+ pReqHdr->rc = vmmdevReqHandler_GetMouseStatusEx(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_SetMouseStatus:
+ pReqHdr->rc = vmmdevReqHandler_SetMouseStatus(pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_SetPointerShape:
+ pReqHdr->rc = vmmdevReqHandler_SetPointerShape(pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_GetHostTime:
+ pReqHdr->rc = vmmdevReqHandler_GetHostTime(pDevIns, pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetHypervisorInfo:
+ pReqHdr->rc = vmmdevReqHandler_GetHypervisorInfo(pDevIns, pReqHdr);
+ break;
+
+ case VMMDevReq_SetHypervisorInfo:
+ pReqHdr->rc = vmmdevReqHandler_SetHypervisorInfo(pDevIns, pReqHdr);
+ break;
+
+ case VMMDevReq_RegisterPatchMemory:
+ pReqHdr->rc = vmmdevReqHandler_RegisterPatchMemory(pDevIns, pReqHdr);
+ break;
+
+ case VMMDevReq_DeregisterPatchMemory:
+ pReqHdr->rc = vmmdevReqHandler_DeregisterPatchMemory(pDevIns, pReqHdr);
+ break;
+
+ case VMMDevReq_SetPowerStatus:
+ {
+ int rc = pReqHdr->rc = vmmdevReqHandler_SetPowerStatus(pDevIns, pThis, pReqHdr);
+ if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
+ rcRet = rc;
+ break;
+ }
+
+ case VMMDevReq_GetDisplayChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetDisplayChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetDisplayChangeRequest2:
+ pReqHdr->rc = vmmdevReqHandler_GetDisplayChangeRequest2(pDevIns, pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_GetDisplayChangeRequestEx:
+ pReqHdr->rc = vmmdevReqHandler_GetDisplayChangeRequestEx(pDevIns, pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_GetDisplayChangeRequestMulti:
+ pReqHdr->rc = vmmdevReqHandler_GetDisplayChangeRequestMulti(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_VideoModeSupported:
+ pReqHdr->rc = vmmdevReqHandler_VideoModeSupported(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_VideoModeSupported2:
+ pReqHdr->rc = vmmdevReqHandler_VideoModeSupported2(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_GetHeightReduction:
+ pReqHdr->rc = vmmdevReqHandler_GetHeightReduction(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_AcknowledgeEvents:
+ pReqHdr->rc = vmmdevReqHandler_AcknowledgeEvents(pDevIns, pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_CtlGuestFilterMask:
+ pReqHdr->rc = vmmdevReqHandler_CtlGuestFilterMask(pDevIns, pThis, pThisCC, pReqHdr);
+ break;
+
+#ifdef VBOX_WITH_HGCM
+ case VMMDevReq_HGCMConnect:
+ vmmdevReqHdrSetHgcmAsyncExecute(pDevIns, GCPhysReqHdr, *ppLock);
+ pReqHdr->rc = vmmdevReqHandler_HGCMConnect(pDevIns, pThis, pThisCC, pReqHdr, GCPhysReqHdr);
+ Assert(pReqHdr->rc == VINF_HGCM_ASYNC_EXECUTE || RT_FAILURE_NP(pReqHdr->rc));
+ if (RT_SUCCESS(pReqHdr->rc))
+ *pfPostOptimize |= VMMDEVREQDISP_POST_F_NO_WRITE_OUT;
+ break;
+
+ case VMMDevReq_HGCMDisconnect:
+ vmmdevReqHdrSetHgcmAsyncExecute(pDevIns, GCPhysReqHdr, *ppLock);
+ pReqHdr->rc = vmmdevReqHandler_HGCMDisconnect(pDevIns, pThis, pThisCC, pReqHdr, GCPhysReqHdr);
+ Assert(pReqHdr->rc == VINF_HGCM_ASYNC_EXECUTE || RT_FAILURE_NP(pReqHdr->rc));
+ if (RT_SUCCESS(pReqHdr->rc))
+ *pfPostOptimize |= VMMDEVREQDISP_POST_F_NO_WRITE_OUT;
+ break;
+
+# ifdef VBOX_WITH_64_BITS_GUESTS
+ case VMMDevReq_HGCMCall64:
+# endif
+ case VMMDevReq_HGCMCall32:
+ vmmdevReqHdrSetHgcmAsyncExecute(pDevIns, GCPhysReqHdr, *ppLock);
+ pReqHdr->rc = vmmdevReqHandler_HGCMCall(pDevIns, pThis, pThisCC, pReqHdr, GCPhysReqHdr, tsArrival, ppLock);
+ Assert(pReqHdr->rc == VINF_HGCM_ASYNC_EXECUTE || RT_FAILURE_NP(pReqHdr->rc));
+ if (RT_SUCCESS(pReqHdr->rc))
+ *pfPostOptimize |= VMMDEVREQDISP_POST_F_NO_WRITE_OUT;
+ break;
+
+ case VMMDevReq_HGCMCancel:
+ pReqHdr->rc = vmmdevReqHandler_HGCMCancel(pThisCC, pReqHdr, GCPhysReqHdr);
+ break;
+
+ case VMMDevReq_HGCMCancel2:
+ pReqHdr->rc = vmmdevReqHandler_HGCMCancel2(pThisCC, pReqHdr);
+ break;
+#endif /* VBOX_WITH_HGCM */
+
+ case VMMDevReq_VideoAccelEnable:
+ pReqHdr->rc = vmmdevReqHandler_VideoAccelEnable(pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_VideoAccelFlush:
+ pReqHdr->rc = vmmdevReqHandler_VideoAccelFlush(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_VideoSetVisibleRegion:
+ pReqHdr->rc = vmmdevReqHandler_VideoSetVisibleRegion(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_VideoUpdateMonitorPositions:
+ pReqHdr->rc = vmmdevReqHandler_VideoUpdateMonitorPositions(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_GetSeamlessChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetSeamlessChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetVRDPChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetVRDPChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetMemBalloonChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetMemBalloonChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_ChangeMemBalloon:
+ pReqHdr->rc = vmmdevReqHandler_ChangeMemBalloon(pDevIns, pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetStatisticsChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetStatisticsChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestStats:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestStats(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_QueryCredentials:
+ pReqHdr->rc = vmmdevReqHandler_QueryCredentials(pThis, pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportCredentialsJudgement:
+ pReqHdr->rc = vmmdevReqHandler_ReportCredentialsJudgement(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_GetHostVersion:
+ pReqHdr->rc = vmmdevReqHandler_GetHostVersion(pReqHdr);
+ break;
+
+ case VMMDevReq_GetCpuHotPlugRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetCpuHotPlugRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_SetCpuHotPlugStatus:
+ pReqHdr->rc = vmmdevReqHandler_SetCpuHotPlugStatus(pThis, pReqHdr);
+ break;
+
+#ifdef VBOX_WITH_PAGE_SHARING
+ case VMMDevReq_RegisterSharedModule:
+ pReqHdr->rc = vmmdevReqHandler_RegisterSharedModule(pDevIns, pReqHdr);
+ break;
+
+ case VMMDevReq_UnregisterSharedModule:
+ pReqHdr->rc = vmmdevReqHandler_UnregisterSharedModule(pDevIns, pReqHdr);
+ break;
+
+ case VMMDevReq_CheckSharedModules:
+ pReqHdr->rc = vmmdevReqHandler_CheckSharedModules(pDevIns, pReqHdr);
+ break;
+
+ case VMMDevReq_GetPageSharingStatus:
+ pReqHdr->rc = vmmdevReqHandler_GetPageSharingStatus(pThisCC, pReqHdr);
+ break;
+
+ case VMMDevReq_DebugIsPageShared:
+ pReqHdr->rc = vmmdevReqHandler_DebugIsPageShared(pDevIns, pReqHdr);
+ break;
+
+#endif /* VBOX_WITH_PAGE_SHARING */
+
+#ifdef DEBUG
+ case VMMDevReq_LogString:
+ pReqHdr->rc = vmmdevReqHandler_LogString(pReqHdr);
+ break;
+#endif
+
+ case VMMDevReq_GetSessionId:
+ pReqHdr->rc = vmmdevReqHandler_GetSessionId(pThis, pReqHdr);
+ break;
+
+ /*
+ * Guest wants to give up a timeslice.
+ * Note! This was only ever used by experimental GAs!
+ */
+ /** @todo maybe we could just remove this? */
+ case VMMDevReq_Idle:
+ {
+ /* just return to EMT telling it that we want to halt */
+ rcRet = VINF_EM_HALT;
+ break;
+ }
+
+ case VMMDevReq_GuestHeartbeat:
+ pReqHdr->rc = vmmDevReqHandler_GuestHeartbeat(pDevIns, pThis);
+ break;
+
+ case VMMDevReq_HeartbeatConfigure:
+ pReqHdr->rc = vmmDevReqHandler_HeartbeatConfigure(pDevIns, pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_NtBugCheck:
+ pReqHdr->rc = vmmDevReqHandler_NtBugCheck(pDevIns, pReqHdr);
+ break;
+
+ default:
+ {
+ pReqHdr->rc = VERR_NOT_IMPLEMENTED;
+ Log(("VMMDev unknown request type %d\n", pReqHdr->requestType));
+ break;
+ }
+ }
+ return rcRet;
+}
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWOUT,
+ * Port I/O write andler for the generic request interface.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+vmmdevRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
+{
+ uint64_t tsArrival;
+ STAM_GET_TS(tsArrival);
+
+ RT_NOREF(offPort, cb, pvUser);
+
+ /*
+ * The caller has passed the guest context physical address of the request
+ * structure. We'll copy all of it into a heap buffer eventually, but we
+ * will have to start off with the header.
+ */
+ VMMDevRequestHeader requestHeader;
+ RT_ZERO(requestHeader);
+ PDMDevHlpPhysRead(pDevIns, (RTGCPHYS)u32, &requestHeader, sizeof(requestHeader));
+
+ /* The structure size must be greater or equal to the header size. */
+ if (requestHeader.size < sizeof(VMMDevRequestHeader))
+ {
+ Log(("VMMDev request header size too small! size = %d\n", requestHeader.size));
+ return VINF_SUCCESS;
+ }
+
+ /* Check the version of the header structure. */
+ if (requestHeader.version != VMMDEV_REQUEST_HEADER_VERSION)
+ {
+ Log(("VMMDev: guest header version (0x%08X) differs from ours (0x%08X)\n", requestHeader.version, VMMDEV_REQUEST_HEADER_VERSION));
+ return VINF_SUCCESS;
+ }
+
+ Log2(("VMMDev request issued: %d\n", requestHeader.requestType));
+
+ VBOXSTRICTRC rcRet = VINF_SUCCESS;
+ /* Check that is doesn't exceed the max packet size. */
+ if (requestHeader.size <= VMMDEV_MAX_VMMDEVREQ_SIZE)
+ {
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+
+ /*
+ * We require the GAs to report it's information before we let it have
+ * access to all the functions. The VMMDevReq_ReportGuestInfo request
+ * is the one which unlocks the access. Newer additions will first
+ * issue VMMDevReq_ReportGuestInfo2, older ones doesn't know this one.
+ * Two exceptions: VMMDevReq_GetHostVersion and VMMDevReq_WriteCoreDump.
+ */
+ if ( pThis->fu32AdditionsOk
+ || requestHeader.requestType == VMMDevReq_ReportGuestInfo2
+ || requestHeader.requestType == VMMDevReq_ReportGuestInfo
+ || requestHeader.requestType == VMMDevReq_WriteCoreDump
+ || requestHeader.requestType == VMMDevReq_GetHostVersion
+ )
+ {
+ /*
+ * The request looks fine. Copy it into a buffer.
+ *
+ * The buffer is only used while on this thread, and this thread is one
+ * of the EMTs, so we keep a 4KB buffer for each EMT around to avoid
+ * wasting time with the heap. Larger allocations goes to the heap, though.
+ */
+ VMCPUID iCpu = PDMDevHlpGetCurrentCpuId(pDevIns);
+ VMMDevRequestHeader *pRequestHeaderFree = NULL;
+ VMMDevRequestHeader *pRequestHeader = NULL;
+ if ( requestHeader.size <= _4K
+ && iCpu < RT_ELEMENTS(pThisCC->apReqBufs))
+ {
+ pRequestHeader = pThisCC->apReqBufs[iCpu];
+ if (pRequestHeader)
+ { /* likely */ }
+ else
+ pThisCC->apReqBufs[iCpu] = pRequestHeader = (VMMDevRequestHeader *)RTMemPageAlloc(_4K);
+ }
+ else
+ {
+ Assert(iCpu != NIL_VMCPUID);
+ STAM_REL_COUNTER_INC(&pThisCC->StatReqBufAllocs);
+ pRequestHeaderFree = pRequestHeader = (VMMDevRequestHeader *)RTMemAlloc(RT_MAX(requestHeader.size, 512));
+ }
+ if (pRequestHeader)
+ {
+ memcpy(pRequestHeader, &requestHeader, sizeof(VMMDevRequestHeader));
+
+ /* Try lock the request if it's a HGCM call and not crossing a page boundrary.
+ Saves on PGM interaction. */
+ VMMDEVREQLOCK Lock = { NULL, { 0, NULL } };
+ PVMMDEVREQLOCK pLock = NULL;
+ size_t cbLeft = requestHeader.size - sizeof(VMMDevRequestHeader);
+ if (cbLeft)
+ {
+ if ( ( requestHeader.requestType == VMMDevReq_HGCMCall32
+ || requestHeader.requestType == VMMDevReq_HGCMCall64)
+ && ((u32 + requestHeader.size) >> X86_PAGE_SHIFT) == (u32 >> X86_PAGE_SHIFT)
+ && RT_SUCCESS(PDMDevHlpPhysGCPhys2CCPtr(pDevIns, u32, 0 /*fFlags*/, &Lock.pvReq, &Lock.Lock)) )
+ {
+ memcpy((uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader),
+ (uint8_t *)Lock.pvReq + sizeof(VMMDevRequestHeader), cbLeft);
+ pLock = &Lock;
+ }
+ else
+ PDMDevHlpPhysRead(pDevIns,
+ (RTGCPHYS)u32 + sizeof(VMMDevRequestHeader),
+ (uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader),
+ cbLeft);
+ }
+
+ /*
+ * Feed buffered request thru the dispatcher.
+ */
+ uint32_t fPostOptimize = 0;
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->CritSect, rcLock);
+
+ rcRet = vmmdevReqDispatcher(pDevIns, pThis, pThisCC, pRequestHeader, u32, tsArrival, &fPostOptimize, &pLock);
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+
+ /*
+ * Write the result back to guest memory (unless it is a locked HGCM call).
+ */
+ if (!(fPostOptimize & VMMDEVREQDISP_POST_F_NO_WRITE_OUT))
+ {
+ if (pLock)
+ memcpy(pLock->pvReq, pRequestHeader, pRequestHeader->size);
+ else
+ PDMDevHlpPhysWrite(pDevIns, u32, pRequestHeader, pRequestHeader->size);
+ }
+
+ if (!pRequestHeaderFree)
+ { /* likely */ }
+ else
+ RTMemFreeZ(pRequestHeaderFree, RT_MAX(requestHeader.size, 512));
+ return rcRet;
+ }
+
+ Log(("VMMDev: RTMemAlloc failed!\n"));
+ requestHeader.rc = VERR_NO_MEMORY;
+ }
+ else
+ {
+ LogRelMax(10, ("VMMDev: Guest has not yet reported to us -- refusing operation of request #%d\n",
+ requestHeader.requestType));
+ requestHeader.rc = VERR_NOT_SUPPORTED;
+ }
+ }
+ else
+ {
+ LogRelMax(50, ("VMMDev: Request packet too big (%x), refusing operation\n", requestHeader.size));
+ requestHeader.rc = VERR_NOT_SUPPORTED;
+ }
+
+ /*
+ * Write the result back to guest memory.
+ */
+ PDMDevHlpPhysWrite(pDevIns, u32, &requestHeader, sizeof(requestHeader));
+
+ return rcRet;
+}
+
+#endif /* IN_RING3 */
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTOUT, Port I/O write handler for requests
+ * that can be handled w/o going to ring-3.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+vmmdevFastRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
+{
+#ifndef IN_RING3
+# if 0 /* This functionality is offered through reading the port (vmmdevFastRequestIrqAck). Leaving it here for later. */
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ RT_NOREF(pvUser, Port, cb);
+
+ /*
+ * We only process a limited set of requests here, reflecting the rest down
+ * to ring-3. So, try read the whole request into a stack buffer and check
+ * if we can handle it.
+ */
+ union
+ {
+ VMMDevRequestHeader Hdr;
+ VMMDevEvents Ack;
+ } uReq;
+ RT_ZERO(uReq);
+
+ VBOXSTRICTRC rcStrict;
+ if (pThis->fu32AdditionsOk)
+ {
+ /* Read it into memory. */
+ uint32_t cbToRead = sizeof(uReq); /* (Adjust to stay within a page if we support more than ack requests.) */
+ rcStrict = PDMDevHlpPhysRead(pDevIns, u32, &uReq, cbToRead);
+ if (rcStrict == VINF_SUCCESS)
+ {
+ /*
+ * Validate the request and check that we want to handle it here.
+ */
+ if ( uReq.Hdr.size >= sizeof(uReq.Hdr)
+ && uReq.Hdr.version == VMMDEV_REQUEST_HEADER_VERSION
+ && ( uReq.Hdr.requestType == VMMDevReq_AcknowledgeEvents
+ && uReq.Hdr.size == sizeof(uReq.Ack)
+ && cbToRead == sizeof(uReq.Ack)
+ && pThisCC->CTX_SUFF(pVMMDevRAM) != NULL)
+ )
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /*
+ * Try grab the critical section.
+ */
+ int rc2 = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_IOM_R3_IOPORT_WRITE);
+ if (rc2 == VINF_SUCCESS)
+ {
+ /*
+ * Handle the request and write back the result to the guest.
+ */
+ uReq.Hdr.rc = vmmdevReqHandler_AcknowledgeEvents(pThis, &uReq.Hdr);
+
+ rcStrict = PDMDevHlpPhysWrite(pDevIns, u32, &uReq, uReq.Hdr.size);
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ if (rcStrict == VINF_SUCCESS)
+ { /* likely */ }
+ else
+ Log(("vmmdevFastRequestHandler: PDMDevHlpPhysWrite(%#RX32+rc,4) -> %Rrc (%RTbool)\n",
+ u32, VBOXSTRICTRC_VAL(rcStrict), PGM_PHYS_RW_IS_SUCCESS(rcStrict) ));
+ }
+ else
+ {
+ Log(("vmmdevFastRequestHandler: PDMDevHlpPDMCritSectEnter -> %Rrc\n", rc2));
+ rcStrict = rc2;
+ }
+ }
+ else
+ {
+ Log(("vmmdevFastRequestHandler: size=%#x version=%#x requestType=%d (pVMMDevRAM=%p) -> R3\n",
+ uReq.Hdr.size, uReq.Hdr.version, uReq.Hdr.requestType, pThisCC->CTX_SUFF(pVMMDevRAM) ));
+ rcStrict = VINF_IOM_R3_IOPORT_WRITE;
+ }
+ }
+ else
+ Log(("vmmdevFastRequestHandler: PDMDevHlpPhysRead(%#RX32,%#RX32) -> %Rrc\n", u32, cbToRead, VBOXSTRICTRC_VAL(rcStrict)));
+ }
+ else
+ {
+ Log(("vmmdevFastRequestHandler: additions nok-okay\n"));
+ rcStrict = VINF_IOM_R3_IOPORT_WRITE;
+ }
+
+ return VBOXSTRICTRC_VAL(rcStrict);
+# else
+ RT_NOREF(pDevIns, pvUser, offPort, u32, cb);
+ return VINF_IOM_R3_IOPORT_WRITE;
+# endif
+
+#else /* IN_RING3 */
+ return vmmdevRequestHandler(pDevIns, pvUser, offPort, u32, cb);
+#endif /* IN_RING3 */
+}
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWIN,
+ * Port I/O read handler for IRQ acknowledging and getting pending events (same
+ * as VMMDevReq_AcknowledgeEvents - just faster).}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+vmmdevFastRequestIrqAck(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+ Assert(PDMDEVINS_2_DATA(pDevIns, PVMMDEV) == pThis);
+ RT_NOREF(pvUser, offPort);
+
+ /* Only 32-bit accesses. */
+ ASSERT_GUEST_MSG_RETURN(cb == sizeof(uint32_t), ("cb=%d\n", cb), VERR_IOM_IOPORT_UNUSED);
+
+ /* The VMMDev memory mapping might've failed, go to ring-3 in that case. */
+ VBOXSTRICTRC rcStrict;
+#ifndef IN_RING3
+ if (pThisCC->CTX_SUFF(pVMMDevRAM) != NULL)
+#endif
+ {
+ /* Enter critical section and check that the additions has been properly
+ initialized and that we're not in legacy v1.3 device mode. */
+ rcStrict = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_IOM_R3_IOPORT_READ);
+ if (rcStrict == VINF_SUCCESS)
+ {
+ if ( pThis->fu32AdditionsOk
+ && !VMMDEV_INTERFACE_VERSION_IS_1_03(pThis))
+ {
+ /*
+ * Do the job.
+ *
+ * Note! This code is duplicated in vmmdevReqHandler_AcknowledgeEvents.
+ */
+ STAM_REL_COUNTER_INC(&pThis->CTX_SUFF_Z(StatFastIrqAck));
+
+ if (pThis->fNewGuestFilterMaskValid)
+ {
+ pThis->fNewGuestFilterMaskValid = false;
+ pThis->fGuestFilterMask = pThis->fNewGuestFilterMask;
+ }
+
+ *pu32 = pThis->fHostEventFlags & pThis->fGuestFilterMask;
+
+ pThis->fHostEventFlags &= ~pThis->fGuestFilterMask;
+ pThisCC->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false;
+
+ PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0);
+ }
+ else
+ {
+ Log(("vmmdevFastRequestIrqAck: fu32AdditionsOk=%d interfaceVersion=%#x\n", pThis->fu32AdditionsOk,
+ pThis->guestInfo.interfaceVersion));
+ *pu32 = UINT32_MAX;
+ }
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ }
+ }
+#ifndef IN_RING3
+ else
+ rcStrict = VINF_IOM_R3_IOPORT_READ;
+#endif
+ return rcStrict;
+}
+
+
+
+#ifdef IN_RING3
+
+/* -=-=-=-=-=- PCI Device -=-=-=-=-=- */
+
+/**
+ * @callback_method_impl{FNPCIIOREGIONMAP,I/O Port Region}
+ */
+static DECLCALLBACK(int) vmmdevIOPortRegionMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
+ RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ LogFlow(("vmmdevIOPortRegionMap: iRegion=%d GCPhysAddress=%RGp cb=%RGp enmType=%d\n", iRegion, GCPhysAddress, cb, enmType));
+ RT_NOREF(pPciDev, iRegion, cb, enmType);
+
+ Assert(pPciDev == pDevIns->apPciDevs[0]);
+ Assert(enmType == PCI_ADDRESS_SPACE_IO);
+ Assert(iRegion == 0);
+
+ int rc;
+ if (GCPhysAddress != NIL_RTGCPHYS)
+ {
+ AssertMsg(RT_ALIGN(GCPhysAddress, 8) == GCPhysAddress, ("Expected 8 byte alignment. GCPhysAddress=%#RGp\n", GCPhysAddress));
+
+ rc = PDMDevHlpIoPortMap(pDevIns, pThis->hIoPortReq, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = PDMDevHlpIoPortMap(pDevIns, pThis->hIoPortFast, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST_FAST);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ else
+ {
+ rc = PDMDevHlpIoPortUnmap(pDevIns, pThis->hIoPortReq);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = PDMDevHlpIoPortUnmap(pDevIns, pThis->hIoPortFast);
+ AssertLogRelRCReturn(rc, rc);
+ }
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNPCIIOREGIONMAP,VMMDev heap (MMIO2)}
+ */
+static DECLCALLBACK(int) vmmdevMmio2HeapRegionMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
+ RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
+{
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+ LogFlow(("vmmdevR3IORAMRegionMap: iRegion=%d GCPhysAddress=%RGp cb=%RGp enmType=%d\n", iRegion, GCPhysAddress, cb, enmType));
+ RT_NOREF(cb, pPciDev);
+
+ Assert(pPciDev == pDevIns->apPciDevs[0]);
+ AssertReturn(iRegion == 2, VERR_INTERNAL_ERROR_2);
+ AssertReturn(enmType == PCI_ADDRESS_SPACE_MEM_PREFETCH, VERR_INTERNAL_ERROR_3);
+ Assert(pThisCC->pVMMDevHeapR3 != NULL);
+
+ int rc;
+ if (GCPhysAddress != NIL_RTGCPHYS)
+ {
+ rc = PDMDevHlpRegisterVMMDevHeap(pDevIns, GCPhysAddress, pThisCC->pVMMDevHeapR3, VMMDEV_HEAP_SIZE);
+ AssertRC(rc);
+ }
+ else
+ {
+ rc = PDMDevHlpRegisterVMMDevHeap(pDevIns, NIL_RTGCPHYS, pThisCC->pVMMDevHeapR3, VMMDEV_HEAP_SIZE);
+ AssertRCStmt(rc, rc = VINF_SUCCESS);
+ }
+
+ return rc;
+}
+
+
+/* -=-=-=-=-=- Backdoor Logging and Time Sync. -=-=-=-=-=- */
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWOUT, Backdoor Logging.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+vmmdevBackdoorLog(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ RT_NOREF(pvUser, offPort);
+ Assert(offPort == 0);
+
+ if (!pThis->fBackdoorLogDisabled && cb == 1)
+ {
+
+ /* The raw version. */
+ switch (u32)
+ {
+ case '\r': LogIt(RTLOGGRPFLAGS_LEVEL_2, LOG_GROUP_DEV_VMM_BACKDOOR, ("vmmdev: <return>\n")); break;
+ case '\n': LogIt(RTLOGGRPFLAGS_LEVEL_2, LOG_GROUP_DEV_VMM_BACKDOOR, ("vmmdev: <newline>\n")); break;
+ case '\t': LogIt(RTLOGGRPFLAGS_LEVEL_2, LOG_GROUP_DEV_VMM_BACKDOOR, ("vmmdev: <tab>\n")); break;
+ default: LogIt(RTLOGGRPFLAGS_LEVEL_2, LOG_GROUP_DEV_VMM_BACKDOOR, ("vmmdev: %c (%02x)\n", u32, u32)); break;
+ }
+
+ /* The readable, buffered version. */
+ uint32_t offMsg = RT_MIN(pThis->offMsg, sizeof(pThis->szMsg) - 1);
+ if (u32 == '\n' || u32 == '\r')
+ {
+ pThis->szMsg[offMsg] = '\0';
+ if (offMsg)
+ LogRelIt(RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP_DEV_VMM_BACKDOOR, ("VMMDev: Guest Log: %.*s\n", offMsg, pThis->szMsg));
+ pThis->offMsg = 0;
+ }
+ else
+ {
+ if (offMsg >= sizeof(pThis->szMsg) - 1)
+ {
+ pThis->szMsg[sizeof(pThis->szMsg) - 1] = '\0';
+ LogRelIt(RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP_DEV_VMM_BACKDOOR,
+ ("VMMDev: Guest Log: %.*s\n", sizeof(pThis->szMsg) - 1, pThis->szMsg));
+ offMsg = 0;
+ }
+ pThis->szMsg[offMsg++] = (char )u32;
+ pThis->szMsg[offMsg] = '\0';
+ pThis->offMsg = offMsg;
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+#ifdef VMMDEV_WITH_ALT_TIMESYNC
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWOUT, Alternative time synchronization.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+vmmdevAltTimeSyncWrite(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
+{
+ RT_NOREF(pvUser, offPort);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ if (cb == 4)
+ {
+ /* Selects high (0) or low (1) DWORD. The high has to be read first. */
+ switch (u32)
+ {
+ case 0:
+ pThis->fTimesyncBackdoorLo = false;
+ break;
+ case 1:
+ pThis->fTimesyncBackdoorLo = true;
+ break;
+ default:
+ Log(("vmmdevAltTimeSyncWrite: Invalid access cb=%#x u32=%#x\n", cb, u32));
+ break;
+ }
+ }
+ else
+ Log(("vmmdevAltTimeSyncWrite: Invalid access cb=%#x u32=%#x\n", cb, u32));
+ return VINF_SUCCESS;
+}
+
+/**
+ * @callback_method_impl{FNIOMIOPORTOUT, Alternative time synchronization.}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+vmmdevAltTimeSyncRead(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
+{
+ RT_NOREF(pvUser, offPort);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ VBOXSTRICTRC rc;
+ if (cb == 4)
+ {
+ if (pThis->fTimesyncBackdoorLo)
+ *pu32 = (uint32_t)pThis->msLatchedHostTime;
+ else
+ {
+ /* Reading the high dword gets and saves the current time. */
+ RTTIMESPEC Now;
+ pThis->msLatchedHostTime = RTTimeSpecGetMilli(PDMDevHlpTMUtcNow(pDevIns, &Now));
+ *pu32 = (uint32_t)(pThis->msLatchedHostTime >> 32);
+ }
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ Log(("vmmdevAltTimeSyncRead: Invalid access cb=%#x\n", cb));
+ rc = VERR_IOM_IOPORT_UNUSED;
+ }
+ return rc;
+}
+
+#endif /* VMMDEV_WITH_ALT_TIMESYNC */
+
+
+/* -=-=-=-=-=- IBase -=-=-=-=-=- */
+
+/**
+ * @interface_method_impl{PDMIBASE,pfnQueryInterface}
+ */
+static DECLCALLBACK(void *) vmmdevPortQueryInterface(PPDMIBASE pInterface, const char *pszIID)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IBase);
+
+ PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
+ PDMIBASE_RETURN_INTERFACE(pszIID, PDMIVMMDEVPORT, &pThisCC->IPort);
+#ifdef VBOX_WITH_HGCM
+ PDMIBASE_RETURN_INTERFACE(pszIID, PDMIHGCMPORT, &pThisCC->IHGCMPort);
+#endif
+ /* Currently only for shared folders. */
+ PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->SharedFolders.ILeds);
+ return NULL;
+}
+
+
+/* -=-=-=-=-=- ILeds -=-=-=-=-=- */
+
+/**
+ * Gets the pointer to the status LED of a unit.
+ *
+ * @returns VBox status code.
+ * @param pInterface Pointer to the interface structure containing the called function pointer.
+ * @param iLUN The unit which status LED we desire.
+ * @param ppLed Where to store the LED pointer.
+ */
+static DECLCALLBACK(int) vmmdevQueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, SharedFolders.ILeds);
+ if (iLUN == 0) /* LUN 0 is shared folders */
+ {
+ *ppLed = &pThisCC->SharedFolders.Led;
+ return VINF_SUCCESS;
+ }
+ return VERR_PDM_LUN_NOT_FOUND;
+}
+
+
+/* -=-=-=-=-=- PDMIVMMDEVPORT (VMMDEV::IPort) -=-=-=-=-=- */
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnQueryAbsoluteMouse}
+ */
+static DECLCALLBACK(int) vmmdevIPort_QueryAbsoluteMouse(PPDMIVMMDEVPORT pInterface, int32_t *pxAbs, int32_t *pyAbs)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVMMDEV);
+
+ /** @todo at the first sign of trouble in this area, just enter the critsect.
+ * As indicated by the comment below, the atomic reads serves no real purpose
+ * here since we can assume cache coherency protocoles and int32_t alignment
+ * rules making sure we won't see a halfwritten value. */
+ if (pxAbs)
+ *pxAbs = ASMAtomicReadS32(&pThis->xMouseAbs); /* why the atomic read? */
+ if (pyAbs)
+ *pyAbs = ASMAtomicReadS32(&pThis->yMouseAbs);
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnSetAbsoluteMouse}
+ */
+static DECLCALLBACK(int) vmmdevIPort_SetAbsoluteMouse(PPDMIVMMDEVPORT pInterface, int32_t xAbs, int32_t yAbs,
+ int32_t dz, int32_t dw, uint32_t fButtons)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rcLock, rcLock);
+
+ if ( pThis->xMouseAbs != xAbs
+ || pThis->yMouseAbs != yAbs
+ || dz
+ || dw
+ || pThis->fMouseButtons != fButtons)
+ {
+ Log2(("vmmdevIPort_SetAbsoluteMouse : settings absolute position to x = %d, y = %d, z = %d, w = %d, fButtons = 0x%x\n",
+ xAbs, yAbs, dz, dw, fButtons));
+
+ pThis->xMouseAbs = xAbs;
+ pThis->yMouseAbs = yAbs;
+ pThis->dzMouse = dz;
+ pThis->dwMouse = dw;
+ pThis->fMouseButtons = fButtons;
+
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
+ }
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnQueryMouseCapabilities}
+ */
+static DECLCALLBACK(int) vmmdevIPort_QueryMouseCapabilities(PPDMIVMMDEVPORT pInterface, uint32_t *pfCapabilities)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVMMDEV);
+ AssertPtrReturn(pfCapabilities, VERR_INVALID_PARAMETER);
+
+ *pfCapabilities = pThis->fMouseCapabilities;
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnUpdateMouseCapabilities}
+ */
+static DECLCALLBACK(int)
+vmmdevIPort_UpdateMouseCapabilities(PPDMIVMMDEVPORT pInterface, uint32_t fCapsAdded, uint32_t fCapsRemoved)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rcLock, rcLock);
+
+ uint32_t fOldCaps = pThis->fMouseCapabilities;
+ pThis->fMouseCapabilities &= ~(fCapsRemoved & VMMDEV_MOUSE_HOST_MASK);
+ pThis->fMouseCapabilities |= (fCapsAdded & VMMDEV_MOUSE_HOST_MASK)
+ | VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR
+ | VMMDEV_MOUSE_HOST_USES_FULL_STATE_PROTOCOL;
+ bool fNotify = fOldCaps != pThis->fMouseCapabilities;
+
+ LogRelFlow(("VMMDev: vmmdevIPort_UpdateMouseCapabilities: fCapsAdded=0x%x, fCapsRemoved=0x%x, fNotify=%RTbool\n", fCapsAdded,
+ fCapsRemoved, fNotify));
+
+ if (fNotify)
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED);
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+static bool vmmdevIsMonitorDefEqual(VMMDevDisplayDef const *pNew, VMMDevDisplayDef const *pOld)
+{
+ bool fEqual = pNew->idDisplay == pOld->idDisplay;
+
+ fEqual = fEqual && ( !RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN) /* No change. */
+ || ( RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN) /* Old value exists and */
+ && pNew->xOrigin == pOld->xOrigin /* the old is equal to the new. */
+ && pNew->yOrigin == pOld->yOrigin));
+
+ fEqual = fEqual && ( !RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_CX)
+ || ( RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_CX)
+ && pNew->cx == pOld->cx));
+
+ fEqual = fEqual && ( !RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_CY)
+ || ( RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_CY)
+ && pNew->cy == pOld->cy));
+
+ fEqual = fEqual && ( !RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_BPP)
+ || ( RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_BPP)
+ && pNew->cBitsPerPixel == pOld->cBitsPerPixel));
+
+ fEqual = fEqual && ( RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_DISABLED)
+ == RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_DISABLED));
+
+ fEqual = fEqual && ( RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_PRIMARY)
+ == RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_PRIMARY));
+
+ return fEqual;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnRequestDisplayChange}
+ */
+static DECLCALLBACK(int)
+vmmdevIPort_RequestDisplayChange(PPDMIVMMDEVPORT pInterface, uint32_t cDisplays, VMMDevDisplayDef const *paDisplays, bool fForce, bool fMayNotify)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ int rc = VINF_SUCCESS;
+ bool fNotifyGuest = false;
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rcLock, rcLock);
+
+ uint32_t i;
+ for (i = 0; i < cDisplays; ++i)
+ {
+ VMMDevDisplayDef const *p = &paDisplays[i];
+
+ /* Either one display definition is provided or the display id must be equal to the array index. */
+ AssertBreakStmt(cDisplays == 1 || p->idDisplay == i, rc = VERR_INVALID_PARAMETER);
+ AssertBreakStmt(p->idDisplay < RT_ELEMENTS(pThis->displayChangeData.aRequests), rc = VERR_INVALID_PARAMETER);
+
+ DISPLAYCHANGEREQUEST *pRequest = &pThis->displayChangeData.aRequests[p->idDisplay];
+
+ VMMDevDisplayDef const *pLastRead = &pRequest->lastReadDisplayChangeRequest;
+
+ /* Verify that the new resolution is different and that guest does not yet know about it. */
+ bool const fDifferentResolution = fForce || !vmmdevIsMonitorDefEqual(p, pLastRead);
+
+ LogFunc(("same=%d. New: %dx%d, cBits=%d, id=%d. Old: %dx%d, cBits=%d, id=%d. @%d,%d, Enabled=%d, ChangeOrigin=%d\n",
+ !fDifferentResolution, p->cx, p->cy, p->cBitsPerPixel, p->idDisplay,
+ pLastRead->cx, pLastRead->cy, pLastRead->cBitsPerPixel, pLastRead->idDisplay,
+ p->xOrigin, p->yOrigin,
+ !RT_BOOL(p->fDisplayFlags & VMMDEV_DISPLAY_DISABLED),
+ RT_BOOL(p->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN)));
+
+ /* We could validate the information here but hey, the guest can do that as well! */
+ pRequest->displayChangeRequest = *p;
+ pRequest->fPending = fDifferentResolution && fMayNotify;
+
+ fNotifyGuest = fNotifyGuest || fDifferentResolution;
+ }
+
+ if (RT_SUCCESS(rc) && fMayNotify)
+ {
+ if (fNotifyGuest)
+ {
+ for (i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); ++i)
+ {
+ DISPLAYCHANGEREQUEST *pRequest = &pThis->displayChangeData.aRequests[i];
+ if (pRequest->fPending)
+ {
+ VMMDevDisplayDef const *p = &pRequest->displayChangeRequest;
+ LogRel(("VMMDev: SetVideoModeHint: Got a video mode hint (%dx%dx%d)@(%dx%d),(%d;%d) at %d\n",
+ p->cx, p->cy, p->cBitsPerPixel, p->xOrigin, p->yOrigin,
+ !RT_BOOL(p->fDisplayFlags & VMMDEV_DISPLAY_DISABLED),
+ RT_BOOL(p->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN), i));
+ }
+ }
+
+ /* IRQ so the guest knows what's going on */
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST);
+ }
+ }
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return rc;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnRequestSeamlessChange}
+ */
+static DECLCALLBACK(int) vmmdevIPort_RequestSeamlessChange(PPDMIVMMDEVPORT pInterface, bool fEnabled)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rcLock, rcLock);
+
+ /* Verify that the new resolution is different and that guest does not yet know about it. */
+ bool fSameMode = (pThis->fLastSeamlessEnabled == fEnabled);
+
+ Log(("vmmdevIPort_RequestSeamlessChange: same=%d. new=%d\n", fSameMode, fEnabled));
+
+ if (!fSameMode)
+ {
+ /* we could validate the information here but hey, the guest can do that as well! */
+ pThis->fSeamlessEnabled = fEnabled;
+
+ /* IRQ so the guest knows what's going on */
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST);
+ }
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnSetMemoryBalloon}
+ */
+static DECLCALLBACK(int) vmmdevIPort_SetMemoryBalloon(PPDMIVMMDEVPORT pInterface, uint32_t cMbBalloon)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rcLock, rcLock);
+
+ /* Verify that the new resolution is different and that guest does not yet know about it. */
+ Log(("vmmdevIPort_SetMemoryBalloon: old=%u new=%u\n", pThis->cMbMemoryBalloonLast, cMbBalloon));
+ if (pThis->cMbMemoryBalloonLast != cMbBalloon)
+ {
+ /* we could validate the information here but hey, the guest can do that as well! */
+ pThis->cMbMemoryBalloon = cMbBalloon;
+
+ /* IRQ so the guest knows what's going on */
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_BALLOON_CHANGE_REQUEST);
+ }
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnVRDPChange}
+ */
+static DECLCALLBACK(int) vmmdevIPort_VRDPChange(PPDMIVMMDEVPORT pInterface, bool fVRDPEnabled, uint32_t uVRDPExperienceLevel)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rcLock, rcLock);
+
+ bool fSame = (pThis->fVRDPEnabled == fVRDPEnabled);
+
+ Log(("vmmdevIPort_VRDPChange: old=%d. new=%d\n", pThis->fVRDPEnabled, fVRDPEnabled));
+
+ if (!fSame)
+ {
+ pThis->fVRDPEnabled = fVRDPEnabled;
+ pThis->uVRDPExperienceLevel = uVRDPExperienceLevel;
+
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_VRDP);
+ }
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnSetStatisticsInterval}
+ */
+static DECLCALLBACK(int) vmmdevIPort_SetStatisticsInterval(PPDMIVMMDEVPORT pInterface, uint32_t cSecsStatInterval)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rcLock, rcLock);
+
+ /* Verify that the new resolution is different and that guest does not yet know about it. */
+ bool fSame = (pThis->cSecsLastStatInterval == cSecsStatInterval);
+
+ Log(("vmmdevIPort_SetStatisticsInterval: old=%d. new=%d\n", pThis->cSecsLastStatInterval, cSecsStatInterval));
+
+ if (!fSame)
+ {
+ /* we could validate the information here but hey, the guest can do that as well! */
+ pThis->cSecsStatInterval = cSecsStatInterval;
+
+ /* IRQ so the guest knows what's going on */
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST);
+ }
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnSetCredentials}
+ */
+static DECLCALLBACK(int) vmmdevIPort_SetCredentials(PPDMIVMMDEVPORT pInterface, const char *pszUsername,
+ const char *pszPassword, const char *pszDomain, uint32_t fFlags)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+
+ AssertReturn(fFlags & (VMMDEV_SETCREDENTIALS_GUESTLOGON | VMMDEV_SETCREDENTIALS_JUDGE), VERR_INVALID_PARAMETER);
+ size_t const cchUsername = strlen(pszUsername);
+ AssertReturn(cchUsername < VMMDEV_CREDENTIALS_SZ_SIZE, VERR_BUFFER_OVERFLOW);
+ size_t const cchPassword = strlen(pszPassword);
+ AssertReturn(cchPassword < VMMDEV_CREDENTIALS_SZ_SIZE, VERR_BUFFER_OVERFLOW);
+ size_t const cchDomain = strlen(pszDomain);
+ AssertReturn(cchDomain < VMMDEV_CREDENTIALS_SZ_SIZE, VERR_BUFFER_OVERFLOW);
+
+ VMMDEVCREDS *pCredentials = pThisCC->pCredentials;
+ AssertPtrReturn(pCredentials, VERR_NOT_SUPPORTED);
+
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rcLock, rcLock);
+
+ /*
+ * Logon mode
+ */
+ if (fFlags & VMMDEV_SETCREDENTIALS_GUESTLOGON)
+ {
+ /* memorize the data */
+ memcpy(pCredentials->Logon.szUserName, pszUsername, cchUsername);
+ pThisCC->pCredentials->Logon.szUserName[cchUsername] = '\0';
+ memcpy(pCredentials->Logon.szPassword, pszPassword, cchPassword);
+ pCredentials->Logon.szPassword[cchPassword] = '\0';
+ memcpy(pCredentials->Logon.szDomain, pszDomain, cchDomain);
+ pCredentials->Logon.szDomain[cchDomain] = '\0';
+ pCredentials->Logon.fAllowInteractiveLogon = !(fFlags & VMMDEV_SETCREDENTIALS_NOLOCALLOGON);
+ }
+ /*
+ * Credentials verification mode?
+ */
+ else
+ {
+ /* memorize the data */
+ memcpy(pCredentials->Judge.szUserName, pszUsername, cchUsername);
+ pCredentials->Judge.szUserName[cchUsername] = '\0';
+ memcpy(pCredentials->Judge.szPassword, pszPassword, cchPassword);
+ pCredentials->Judge.szPassword[cchPassword] = '\0';
+ memcpy(pCredentials->Judge.szDomain, pszDomain, cchDomain);
+ pCredentials->Judge.szDomain[cchDomain] = '\0';
+
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_JUDGE_CREDENTIALS);
+ }
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnVBVAChange}
+ *
+ * Notification from the Display. Especially useful when acceleration is
+ * disabled after a video mode change.
+ */
+static DECLCALLBACK(void) vmmdevIPort_VBVAChange(PPDMIVMMDEVPORT pInterface, bool fEnabled)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVMMDEV);
+ Log(("vmmdevIPort_VBVAChange: fEnabled = %d\n", fEnabled));
+
+ /* Only used by saved state, which I guess is why we don't bother with locking here. */
+ pThis->u32VideoAccelEnabled = fEnabled;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnCpuHotUnplug}
+ */
+static DECLCALLBACK(int) vmmdevIPort_CpuHotUnplug(PPDMIVMMDEVPORT pInterface, uint32_t idCpuCore, uint32_t idCpuPackage)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+
+ Log(("vmmdevIPort_CpuHotUnplug: idCpuCore=%u idCpuPackage=%u\n", idCpuCore, idCpuPackage));
+
+ int rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rc, rc);
+
+ if (pThis->fCpuHotPlugEventsEnabled)
+ {
+ pThis->enmCpuHotPlugEvent = VMMDevCpuEventType_Unplug;
+ pThis->idCpuCore = idCpuCore;
+ pThis->idCpuPackage = idCpuPackage;
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_CPU_HOTPLUG);
+ }
+ else
+ rc = VERR_VMMDEV_CPU_HOTPLUG_NOT_MONITORED_BY_GUEST;
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return rc;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnCpuHotPlug}
+ */
+static DECLCALLBACK(int) vmmdevIPort_CpuHotPlug(PPDMIVMMDEVPORT pInterface, uint32_t idCpuCore, uint32_t idCpuPackage)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+
+ Log(("vmmdevCpuPlug: idCpuCore=%u idCpuPackage=%u\n", idCpuCore, idCpuPackage));
+
+ int rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rc, rc);
+
+ if (pThis->fCpuHotPlugEventsEnabled)
+ {
+ pThis->enmCpuHotPlugEvent = VMMDevCpuEventType_Plug;
+ pThis->idCpuCore = idCpuCore;
+ pThis->idCpuPackage = idCpuPackage;
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_CPU_HOTPLUG);
+ }
+ else
+ rc = VERR_VMMDEV_CPU_HOTPLUG_NOT_MONITORED_BY_GUEST;
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return rc;
+}
+
+
+/* -=-=-=-=-=- Saved State -=-=-=-=-=- */
+
+/**
+ * @callback_method_impl{FNSSMDEVLIVEEXEC}
+ */
+static DECLCALLBACK(int) vmmdevLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
+{
+ RT_NOREF(uPass);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+
+ pHlp->pfnSSMPutBool(pSSM, pThis->fGetHostTimeDisabled);
+ pHlp->pfnSSMPutBool(pSSM, pThis->fBackdoorLogDisabled);
+ pHlp->pfnSSMPutBool(pSSM, pThis->fKeepCredentials);
+ pHlp->pfnSSMPutBool(pSSM, pThis->fHeapEnabled);
+
+ return VINF_SSM_DONT_CALL_AGAIN;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMDEVSAVEEXEC}
+ */
+static DECLCALLBACK(int) vmmdevSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+ int rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ AssertRCReturn(rc, rc);
+
+ vmmdevLiveExec(pDevIns, pSSM, SSM_PASS_FINAL);
+
+ pHlp->pfnSSMPutU32(pSSM, 0 /*was pThis->hypervisorSize, which was always zero*/);
+ pHlp->pfnSSMPutU32(pSSM, pThis->fMouseCapabilities);
+ pHlp->pfnSSMPutS32(pSSM, pThis->xMouseAbs);
+ pHlp->pfnSSMPutS32(pSSM, pThis->yMouseAbs);
+ pHlp->pfnSSMPutS32(pSSM, pThis->dzMouse);
+ pHlp->pfnSSMPutS32(pSSM, pThis->dwMouse);
+ pHlp->pfnSSMPutU32(pSSM, pThis->fMouseButtons);
+
+ pHlp->pfnSSMPutBool(pSSM, pThis->fNewGuestFilterMaskValid);
+ pHlp->pfnSSMPutU32(pSSM, pThis->fNewGuestFilterMask);
+ pHlp->pfnSSMPutU32(pSSM, pThis->fGuestFilterMask);
+ pHlp->pfnSSMPutU32(pSSM, pThis->fHostEventFlags);
+ /* The following is not strictly necessary as PGM restores MMIO2, keeping it for historical reasons. */
+ pHlp->pfnSSMPutMem(pSSM, &pThisCC->pVMMDevRAMR3->V, sizeof(pThisCC->pVMMDevRAMR3->V));
+
+ pHlp->pfnSSMPutMem(pSSM, &pThis->guestInfo, sizeof(pThis->guestInfo));
+ pHlp->pfnSSMPutU32(pSSM, pThis->fu32AdditionsOk);
+ pHlp->pfnSSMPutU32(pSSM, pThis->u32VideoAccelEnabled);
+ pHlp->pfnSSMPutBool(pSSM, pThis->displayChangeData.fGuestSentChangeEventAck);
+
+ pHlp->pfnSSMPutU32(pSSM, pThis->fGuestCaps);
+
+#ifdef VBOX_WITH_HGCM
+ vmmdevR3HgcmSaveState(pThisCC, pSSM);
+#endif /* VBOX_WITH_HGCM */
+
+ pHlp->pfnSSMPutU32(pSSM, pThis->fHostCursorRequested);
+
+ pHlp->pfnSSMPutU32(pSSM, pThis->guestInfo2.uFullVersion);
+ pHlp->pfnSSMPutU32(pSSM, pThis->guestInfo2.uRevision);
+ pHlp->pfnSSMPutU32(pSSM, pThis->guestInfo2.fFeatures);
+ pHlp->pfnSSMPutStrZ(pSSM, pThis->guestInfo2.szName);
+ pHlp->pfnSSMPutU32(pSSM, pThis->cFacilityStatuses);
+ for (uint32_t i = 0; i < pThis->cFacilityStatuses; i++)
+ {
+ pHlp->pfnSSMPutU32(pSSM, pThis->aFacilityStatuses[i].enmFacility);
+ pHlp->pfnSSMPutU32(pSSM, pThis->aFacilityStatuses[i].fFlags);
+ pHlp->pfnSSMPutU16(pSSM, (uint16_t)pThis->aFacilityStatuses[i].enmStatus);
+ pHlp->pfnSSMPutS64(pSSM, RTTimeSpecGetNano(&pThis->aFacilityStatuses[i].TimeSpecTS));
+ }
+
+ /* Heartbeat: */
+ pHlp->pfnSSMPutBool(pSSM, pThis->fHeartbeatActive);
+ pHlp->pfnSSMPutBool(pSSM, pThis->fFlatlined);
+ pHlp->pfnSSMPutU64(pSSM, pThis->nsLastHeartbeatTS);
+ PDMDevHlpTimerSave(pDevIns, pThis->hFlatlinedTimer, pSSM);
+
+ pHlp->pfnSSMPutStructEx(pSSM, &pThis->displayChangeData, sizeof(pThis->displayChangeData), 0,
+ g_aSSMDISPLAYCHANGEDATAStateFields, NULL);
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @callback_method_impl{FNSSMDEVLOADEXEC}
+ */
+static DECLCALLBACK(int) vmmdevLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+ int rc;
+
+ if ( uVersion > VMMDEV_SAVED_STATE_VERSION
+ || uVersion < 6)
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+
+ /* config */
+ if (uVersion > VMMDEV_SAVED_STATE_VERSION_VBOX_30)
+ {
+ bool f;
+ rc = pHlp->pfnSSMGetBool(pSSM, &f); AssertRCReturn(rc, rc);
+ if (pThis->fGetHostTimeDisabled != f)
+ LogRel(("VMMDev: Config mismatch - fGetHostTimeDisabled: config=%RTbool saved=%RTbool\n", pThis->fGetHostTimeDisabled, f));
+
+ rc = pHlp->pfnSSMGetBool(pSSM, &f); AssertRCReturn(rc, rc);
+ if (pThis->fBackdoorLogDisabled != f)
+ LogRel(("VMMDev: Config mismatch - fBackdoorLogDisabled: config=%RTbool saved=%RTbool\n", pThis->fBackdoorLogDisabled, f));
+
+ rc = pHlp->pfnSSMGetBool(pSSM, &f); AssertRCReturn(rc, rc);
+ if (pThis->fKeepCredentials != f)
+ return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - fKeepCredentials: config=%RTbool saved=%RTbool"),
+ pThis->fKeepCredentials, f);
+ rc = pHlp->pfnSSMGetBool(pSSM, &f); AssertRCReturn(rc, rc);
+ if (pThis->fHeapEnabled != f)
+ return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - fHeapEnabled: config=%RTbool saved=%RTbool"),
+ pThis->fHeapEnabled, f);
+ }
+
+ if (uPass != SSM_PASS_FINAL)
+ return VINF_SUCCESS;
+
+ /* state */
+ uint32_t uIgn;
+ pHlp->pfnSSMGetU32(pSSM, &uIgn);
+ pHlp->pfnSSMGetU32(pSSM, &pThis->fMouseCapabilities);
+ pHlp->pfnSSMGetS32(pSSM, &pThis->xMouseAbs);
+ pHlp->pfnSSMGetS32(pSSM, &pThis->yMouseAbs);
+ if (uVersion >= VMMDEV_SAVED_STATE_VERSION_VMM_MOUSE_EXTENDED_DATA)
+ {
+ pHlp->pfnSSMGetS32(pSSM, &pThis->dzMouse);
+ pHlp->pfnSSMGetS32(pSSM, &pThis->dwMouse);
+ pHlp->pfnSSMGetU32(pSSM, &pThis->fMouseButtons);
+ }
+
+ pHlp->pfnSSMGetBool(pSSM, &pThis->fNewGuestFilterMaskValid);
+ pHlp->pfnSSMGetU32(pSSM, &pThis->fNewGuestFilterMask);
+ pHlp->pfnSSMGetU32(pSSM, &pThis->fGuestFilterMask);
+ pHlp->pfnSSMGetU32(pSSM, &pThis->fHostEventFlags);
+
+ //pHlp->pfnSSMGetBool(pSSM, &pThis->pVMMDevRAMR3->fHaveEvents);
+ // here be dragons (probably)
+ pHlp->pfnSSMGetMem(pSSM, &pThisCC->pVMMDevRAMR3->V, sizeof(pThisCC->pVMMDevRAMR3->V));
+
+ pHlp->pfnSSMGetMem(pSSM, &pThis->guestInfo, sizeof(pThis->guestInfo));
+ pHlp->pfnSSMGetU32(pSSM, &pThis->fu32AdditionsOk);
+ pHlp->pfnSSMGetU32(pSSM, &pThis->u32VideoAccelEnabled);
+ if (uVersion > 10)
+ pHlp->pfnSSMGetBool(pSSM, &pThis->displayChangeData.fGuestSentChangeEventAck);
+
+ rc = pHlp->pfnSSMGetU32(pSSM, &pThis->fGuestCaps);
+
+ /* Attributes which were temporarily introduced in r30072 */
+ if (uVersion == 7)
+ {
+ uint32_t temp;
+ pHlp->pfnSSMGetU32(pSSM, &temp);
+ rc = pHlp->pfnSSMGetU32(pSSM, &temp);
+ }
+ AssertRCReturn(rc, rc);
+
+#ifdef VBOX_WITH_HGCM
+ rc = vmmdevR3HgcmLoadState(pDevIns, pThis, pThisCC, pSSM, uVersion);
+ AssertRCReturn(rc, rc);
+#endif /* VBOX_WITH_HGCM */
+
+ if (uVersion >= 10)
+ rc = pHlp->pfnSSMGetU32(pSSM, &pThis->fHostCursorRequested);
+ AssertRCReturn(rc, rc);
+
+ if (uVersion > VMMDEV_SAVED_STATE_VERSION_MISSING_GUEST_INFO_2)
+ {
+ pHlp->pfnSSMGetU32(pSSM, &pThis->guestInfo2.uFullVersion);
+ pHlp->pfnSSMGetU32(pSSM, &pThis->guestInfo2.uRevision);
+ pHlp->pfnSSMGetU32(pSSM, &pThis->guestInfo2.fFeatures);
+ rc = pHlp->pfnSSMGetStrZ(pSSM, &pThis->guestInfo2.szName[0], sizeof(pThis->guestInfo2.szName));
+ AssertRCReturn(rc, rc);
+ }
+
+ if (uVersion > VMMDEV_SAVED_STATE_VERSION_MISSING_FACILITY_STATUSES)
+ {
+ uint32_t cFacilityStatuses;
+ rc = pHlp->pfnSSMGetU32(pSSM, &cFacilityStatuses);
+ AssertRCReturn(rc, rc);
+
+ for (uint32_t i = 0; i < cFacilityStatuses; i++)
+ {
+ uint32_t uFacility, fFlags;
+ uint16_t uStatus;
+ int64_t iTimeStampNano;
+
+ pHlp->pfnSSMGetU32(pSSM, &uFacility);
+ pHlp->pfnSSMGetU32(pSSM, &fFlags);
+ pHlp->pfnSSMGetU16(pSSM, &uStatus);
+ rc = pHlp->pfnSSMGetS64(pSSM, &iTimeStampNano);
+ AssertRCReturn(rc, rc);
+
+ PVMMDEVFACILITYSTATUSENTRY pEntry = vmmdevGetFacilityStatusEntry(pThis, (VBoxGuestFacilityType)uFacility);
+ AssertLogRelMsgReturn(pEntry,
+ ("VMMDev: Ran out of entries restoring the guest facility statuses. Saved state has %u.\n", cFacilityStatuses),
+ VERR_OUT_OF_RESOURCES);
+ pEntry->enmStatus = (VBoxGuestFacilityStatus)uStatus;
+ pEntry->fFlags = fFlags;
+ RTTimeSpecSetNano(&pEntry->TimeSpecTS, iTimeStampNano);
+ }
+ }
+
+ /*
+ * Heartbeat.
+ */
+ if (uVersion >= VMMDEV_SAVED_STATE_VERSION_HEARTBEAT)
+ {
+ pHlp->pfnSSMGetBoolV(pSSM, &pThis->fHeartbeatActive);
+ pHlp->pfnSSMGetBoolV(pSSM, &pThis->fFlatlined);
+ pHlp->pfnSSMGetU64V(pSSM, &pThis->nsLastHeartbeatTS);
+ rc = PDMDevHlpTimerLoad(pDevIns, pThis->hFlatlinedTimer, pSSM);
+ AssertRCReturn(rc, rc);
+ if (pThis->fFlatlined)
+ LogRel(("vmmdevLoadState: Guest has flatlined. Last heartbeat %'RU64 ns before state was saved.\n",
+ PDMDevHlpTimerGetNano(pDevIns, pThis->hFlatlinedTimer) - pThis->nsLastHeartbeatTS));
+ }
+
+ if (uVersion >= VMMDEV_SAVED_STATE_VERSION_DISPLAY_CHANGE_DATA)
+ {
+ pHlp->pfnSSMGetStructEx(pSSM, &pThis->displayChangeData, sizeof(pThis->displayChangeData), 0,
+ g_aSSMDISPLAYCHANGEDATAStateFields, NULL);
+ }
+
+ /*
+ * On a resume, we send the capabilities changed message so
+ * that listeners can sync their state again
+ */
+ Log(("vmmdevLoadState: capabilities changed (%x), informing connector\n", pThis->fMouseCapabilities));
+ if (pThisCC->pDrv)
+ {
+ pThisCC->pDrv->pfnUpdateMouseCapabilities(pThisCC->pDrv, pThis->fMouseCapabilities);
+ if (uVersion >= 10)
+ pThisCC->pDrv->pfnUpdatePointerShape(pThisCC->pDrv,
+ /*fVisible=*/!!pThis->fHostCursorRequested,
+ /*fAlpha=*/false,
+ /*xHot=*/0, /*yHot=*/0,
+ /*cx=*/0, /*cy=*/0,
+ /*pvShape=*/NULL);
+ }
+
+ if (pThis->fu32AdditionsOk)
+ {
+ vmmdevLogGuestOsInfo(&pThis->guestInfo);
+ if (pThisCC->pDrv)
+ {
+ if (pThis->guestInfo2.uFullVersion && pThisCC->pDrv->pfnUpdateGuestInfo2)
+ pThisCC->pDrv->pfnUpdateGuestInfo2(pThisCC->pDrv, pThis->guestInfo2.uFullVersion, pThis->guestInfo2.szName,
+ pThis->guestInfo2.uRevision, pThis->guestInfo2.fFeatures);
+ if (pThisCC->pDrv->pfnUpdateGuestInfo)
+ pThisCC->pDrv->pfnUpdateGuestInfo(pThisCC->pDrv, &pThis->guestInfo);
+
+ if (pThisCC->pDrv->pfnUpdateGuestStatus)
+ {
+ for (uint32_t i = 0; i < pThis->cFacilityStatuses; i++) /* ascending order! */
+ if ( pThis->aFacilityStatuses[i].enmStatus != VBoxGuestFacilityStatus_Inactive
+ || !pThis->aFacilityStatuses[i].fFixed)
+ pThisCC->pDrv->pfnUpdateGuestStatus(pThisCC->pDrv,
+ pThis->aFacilityStatuses[i].enmFacility,
+ (uint16_t)pThis->aFacilityStatuses[i].enmStatus,
+ pThis->aFacilityStatuses[i].fFlags,
+ &pThis->aFacilityStatuses[i].TimeSpecTS);
+ }
+ }
+ }
+ if (pThisCC->pDrv && pThisCC->pDrv->pfnUpdateGuestCapabilities)
+ pThisCC->pDrv->pfnUpdateGuestCapabilities(pThisCC->pDrv, pThis->fGuestCaps);
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Load state done callback. Notify guest of restore event.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance.
+ * @param pSSM The handle to the saved state.
+ */
+static DECLCALLBACK(int) vmmdevLoadStateDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+ RT_NOREF(pSSM);
+
+#ifdef VBOX_WITH_HGCM
+ int rc = vmmdevR3HgcmLoadStateDone(pDevIns, pThis, pThisCC);
+ AssertLogRelRCReturn(rc, rc);
+#endif /* VBOX_WITH_HGCM */
+
+ /* Reestablish the acceleration status. */
+ if ( pThis->u32VideoAccelEnabled
+ && pThisCC->pDrv)
+ pThisCC->pDrv->pfnVideoAccelEnable(pThisCC->pDrv, !!pThis->u32VideoAccelEnabled, &pThisCC->pVMMDevRAMR3->vbvaMemory);
+
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_RESTORED);
+
+ return VINF_SUCCESS;
+}
+
+
+/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
+
+/**
+ * (Re-)initializes the MMIO2 data.
+ *
+ * @param pThisCC The VMMDev ring-3 instance data.
+ */
+static void vmmdevInitRam(PVMMDEVCC pThisCC)
+{
+ memset(pThisCC->pVMMDevRAMR3, 0, sizeof(VMMDevMemory));
+ pThisCC->pVMMDevRAMR3->u32Size = sizeof(VMMDevMemory);
+ pThisCC->pVMMDevRAMR3->u32Version = VMMDEV_MEMORY_VERSION;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnReset}
+ */
+static DECLCALLBACK(void) vmmdevReset(PPDMDEVINS pDevIns)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->CritSect, rcLock);
+
+ /*
+ * Reset the mouse integration feature bits
+ */
+ if (pThis->fMouseCapabilities & VMMDEV_MOUSE_GUEST_MASK)
+ {
+ pThis->fMouseCapabilities &= ~VMMDEV_MOUSE_GUEST_MASK;
+ /* notify the connector */
+ Log(("vmmdevReset: capabilities changed (%x), informing connector\n", pThis->fMouseCapabilities));
+ pThisCC->pDrv->pfnUpdateMouseCapabilities(pThisCC->pDrv, pThis->fMouseCapabilities);
+ }
+ pThis->fHostCursorRequested = false;
+
+ /* re-initialize the VMMDev memory */
+ if (pThisCC->pVMMDevRAMR3)
+ vmmdevInitRam(pThisCC);
+
+ /* credentials have to go away (by default) */
+ VMMDEVCREDS *pCredentials = pThisCC->pCredentials;
+ if (pCredentials)
+ {
+ if (!pThis->fKeepCredentials)
+ {
+ RT_ZERO(pCredentials->Logon.szUserName);
+ RT_ZERO(pCredentials->Logon.szPassword);
+ RT_ZERO(pCredentials->Logon.szDomain);
+ }
+ RT_ZERO(pCredentials->Judge.szUserName);
+ RT_ZERO(pCredentials->Judge.szPassword);
+ RT_ZERO(pCredentials->Judge.szDomain);
+ }
+
+ /* Reset means that additions will report again. */
+ const bool fVersionChanged = pThis->fu32AdditionsOk
+ || pThis->guestInfo.interfaceVersion
+ || pThis->guestInfo.osType != VBOXOSTYPE_Unknown;
+ if (fVersionChanged)
+ Log(("vmmdevReset: fu32AdditionsOk=%d additionsVersion=%x osType=%#x\n",
+ pThis->fu32AdditionsOk, pThis->guestInfo.interfaceVersion, pThis->guestInfo.osType));
+ pThis->fu32AdditionsOk = false;
+ memset (&pThis->guestInfo, 0, sizeof (pThis->guestInfo));
+ RT_ZERO(pThis->guestInfo2);
+ const bool fCapsChanged = pThis->fGuestCaps != 0; /* Report transition to 0. */
+ pThis->fGuestCaps = 0;
+
+ /* Clear facilities. No need to tell Main as it will get a
+ pfnUpdateGuestInfo callback. */
+ RTTIMESPEC TimeStampNow;
+ RTTimeNow(&TimeStampNow);
+ uint32_t iFacility = pThis->cFacilityStatuses;
+ while (iFacility-- > 0)
+ {
+ pThis->aFacilityStatuses[iFacility].enmStatus = VBoxGuestFacilityStatus_Inactive;
+ pThis->aFacilityStatuses[iFacility].TimeSpecTS = TimeStampNow;
+ }
+
+ /* clear pending display change request. */
+ for (unsigned i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ DISPLAYCHANGEREQUEST *pRequest = &pThis->displayChangeData.aRequests[i];
+ memset(&pRequest->lastReadDisplayChangeRequest, 0, sizeof(pRequest->lastReadDisplayChangeRequest));
+ pRequest->lastReadDisplayChangeRequest.fDisplayFlags = VMMDEV_DISPLAY_DISABLED;
+ pRequest->lastReadDisplayChangeRequest.idDisplay = i;
+ }
+ pThis->displayChangeData.iCurrentMonitor = 0;
+ pThis->displayChangeData.fGuestSentChangeEventAck = false;
+
+ /* disable seamless mode */
+ pThis->fLastSeamlessEnabled = false;
+
+ /* disabled memory ballooning */
+ pThis->cMbMemoryBalloonLast = 0;
+
+ /* disabled statistics updating */
+ pThis->cSecsLastStatInterval = 0;
+
+#ifdef VBOX_WITH_HGCM
+ /* Clear the "HGCM event enabled" flag so the event can be automatically reenabled. */
+ pThisCC->u32HGCMEnabled = 0;
+#endif
+
+ /*
+ * Deactive heartbeat.
+ */
+ if (pThis->fHeartbeatActive)
+ {
+ PDMDevHlpTimerStop(pDevIns, pThis->hFlatlinedTimer);
+ pThis->fFlatlined = false;
+ pThis->fHeartbeatActive = true;
+ }
+
+ /*
+ * Clear the event variables.
+ *
+ * XXX By design we should NOT clear pThis->fHostEventFlags because it is designed
+ * that way so host events do not depend on guest resets. However, the pending
+ * event flags actually _were_ cleared since ages so we mask out events from
+ * clearing which we really need to survive the reset. See xtracker 5767.
+ */
+ pThis->fHostEventFlags &= VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
+ pThis->fGuestFilterMask = 0;
+ pThis->fNewGuestFilterMask = 0;
+ pThis->fNewGuestFilterMaskValid = 0;
+
+ /*
+ * Call the update functions as required.
+ */
+ if (fVersionChanged && pThisCC->pDrv && pThisCC->pDrv->pfnUpdateGuestInfo)
+ pThisCC->pDrv->pfnUpdateGuestInfo(pThisCC->pDrv, &pThis->guestInfo);
+ if (fCapsChanged && pThisCC->pDrv && pThisCC->pDrv->pfnUpdateGuestCapabilities)
+ pThisCC->pDrv->pfnUpdateGuestCapabilities(pThisCC->pDrv, pThis->fGuestCaps);
+
+ /*
+ * Generate a unique session id for this VM; it will be changed for each start, reset or restore.
+ * This can be used for restore detection inside the guest.
+ */
+ pThis->idSession = ASMReadTSC();
+
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+}
+
+
+#ifdef VBOX_WITH_RAW_MODE_KEEP
+/**
+ * @interface_method_impl{PDMDEVREG,pfnRelocate}
+ */
+static DECLCALLBACK(void) vmmdevRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
+{
+ if (offDelta)
+ {
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ LogFlow(("vmmdevRelocate: offDelta=%RGv\n", offDelta));
+
+ if (pThis->pVMMDevRAMRC)
+ pThis->pVMMDevRAMRC += offDelta;
+ pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
+ }
+}
+#endif
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnDestruct}
+ */
+static DECLCALLBACK(int) vmmdevDestruct(PPDMDEVINS pDevIns)
+{
+ PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+
+ /*
+ * Wipe and free the credentials.
+ */
+ VMMDEVCREDS *pCredentials = pThisCC->pCredentials;
+ pThisCC->pCredentials = NULL;
+ if (pCredentials)
+ {
+ if (pThisCC->fSaferCredentials)
+ RTMemSaferFree(pCredentials, sizeof(*pCredentials));
+ else
+ {
+ RTMemWipeThoroughly(pCredentials, sizeof(*pCredentials), 10);
+ RTMemFree(pCredentials);
+ }
+ }
+
+#ifdef VBOX_WITH_HGCM
+ /*
+ * Everything HGCM.
+ */
+ vmmdevR3HgcmDestroy(pDevIns, PDMDEVINS_2_DATA(pDevIns, PVMMDEV), pThisCC);
+#endif
+
+ /*
+ * Free the request buffers.
+ */
+ for (uint32_t iCpu = 0; iCpu < RT_ELEMENTS(pThisCC->apReqBufs); iCpu++)
+ {
+ RTMemPageFree(pThisCC->apReqBufs[iCpu], _4K);
+ pThisCC->apReqBufs[iCpu] = NULL;
+ }
+
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+ /*
+ * Clean up the testing device.
+ */
+ vmmdevR3TestingTerminate(pDevIns);
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnConstruct}
+ */
+static DECLCALLBACK(int) vmmdevConstruct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
+{
+ PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+ int rc;
+
+ Assert(iInstance == 0);
+ RT_NOREF(iInstance);
+
+ /*
+ * Initialize data (most of it anyway).
+ */
+ pThisCC->pDevIns = pDevIns;
+
+ pThis->hFlatlinedTimer = NIL_TMTIMERHANDLE;
+ pThis->hIoPortBackdoorLog = NIL_IOMIOPORTHANDLE;
+ pThis->hIoPortAltTimesync = NIL_IOMIOPORTHANDLE;
+ pThis->hIoPortReq = NIL_IOMIOPORTHANDLE;
+ pThis->hIoPortFast = NIL_IOMIOPORTHANDLE;
+ pThis->hMmio2VMMDevRAM = NIL_PGMMMIO2HANDLE;
+ pThis->hMmio2Heap = NIL_PGMMMIO2HANDLE;
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+ pThis->hIoPortTesting = NIL_IOMIOPORTHANDLE;
+ pThis->hMmioTesting = NIL_IOMMMIOHANDLE;
+ pThis->hTestingLockEvt = NIL_SUPSEMEVENT;
+#endif
+
+ PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
+ PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
+
+ /* PCI vendor, just a free bogus value */
+ PDMPciDevSetVendorId(pPciDev, 0x80ee);
+ /* device ID */
+ PDMPciDevSetDeviceId(pPciDev, 0xcafe);
+ /* class sub code (other type of system peripheral) */
+ PDMPciDevSetClassSub(pPciDev, 0x80);
+ /* class base code (base system peripheral) */
+ PDMPciDevSetClassBase(pPciDev, 0x08);
+ /* header type */
+ PDMPciDevSetHeaderType(pPciDev, 0x00);
+ /* interrupt on pin 0 */
+ PDMPciDevSetInterruptPin(pPciDev, 0x01);
+
+ RTTIMESPEC TimeStampNow;
+ RTTimeNow(&TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_VBoxGuestDriver, true /*fFixed*/, &TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_VBoxService, true /*fFixed*/, &TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_VBoxTrayClient, true /*fFixed*/, &TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_Seamless, true /*fFixed*/, &TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_Graphics, true /*fFixed*/, &TimeStampNow);
+ Assert(pThis->cFacilityStatuses == 5);
+
+ /* disable all screens (no better hints known yet). */
+ /** @todo r=klaus need a way to represent "no hint known" */
+ for (unsigned i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ DISPLAYCHANGEREQUEST *pRequest = &pThis->displayChangeData.aRequests[i];
+ pRequest->displayChangeRequest.fDisplayFlags = VMMDEV_DISPLAY_DISABLED;
+ pRequest->displayChangeRequest.idDisplay = i;
+ pRequest->lastReadDisplayChangeRequest.fDisplayFlags = VMMDEV_DISPLAY_DISABLED;
+ pRequest->lastReadDisplayChangeRequest.idDisplay = i;
+ }
+
+ /*
+ * Interfaces
+ */
+ /* IBase */
+ pThisCC->IBase.pfnQueryInterface = vmmdevPortQueryInterface;
+
+ /* VMMDev port */
+ pThisCC->IPort.pfnQueryAbsoluteMouse = vmmdevIPort_QueryAbsoluteMouse;
+ pThisCC->IPort.pfnSetAbsoluteMouse = vmmdevIPort_SetAbsoluteMouse ;
+ pThisCC->IPort.pfnQueryMouseCapabilities = vmmdevIPort_QueryMouseCapabilities;
+ pThisCC->IPort.pfnUpdateMouseCapabilities = vmmdevIPort_UpdateMouseCapabilities;
+ pThisCC->IPort.pfnRequestDisplayChange = vmmdevIPort_RequestDisplayChange;
+ pThisCC->IPort.pfnSetCredentials = vmmdevIPort_SetCredentials;
+ pThisCC->IPort.pfnVBVAChange = vmmdevIPort_VBVAChange;
+ pThisCC->IPort.pfnRequestSeamlessChange = vmmdevIPort_RequestSeamlessChange;
+ pThisCC->IPort.pfnSetMemoryBalloon = vmmdevIPort_SetMemoryBalloon;
+ pThisCC->IPort.pfnSetStatisticsInterval = vmmdevIPort_SetStatisticsInterval;
+ pThisCC->IPort.pfnVRDPChange = vmmdevIPort_VRDPChange;
+ pThisCC->IPort.pfnCpuHotUnplug = vmmdevIPort_CpuHotUnplug;
+ pThisCC->IPort.pfnCpuHotPlug = vmmdevIPort_CpuHotPlug;
+
+ /* Shared folder LED */
+ pThisCC->SharedFolders.Led.u32Magic = PDMLED_MAGIC;
+ pThisCC->SharedFolders.ILeds.pfnQueryStatusLed = vmmdevQueryStatusLed;
+
+#ifdef VBOX_WITH_HGCM
+ /* HGCM port */
+ pThisCC->IHGCMPort.pfnCompleted = hgcmR3Completed;
+ pThisCC->IHGCMPort.pfnIsCmdRestored = hgcmR3IsCmdRestored;
+ pThisCC->IHGCMPort.pfnIsCmdCancelled = hgcmR3IsCmdCancelled;
+ pThisCC->IHGCMPort.pfnGetRequestor = hgcmR3GetRequestor;
+ pThisCC->IHGCMPort.pfnGetVMMDevSessionId = hgcmR3GetVMMDevSessionId;
+#endif
+
+ pThisCC->pCredentials = (VMMDEVCREDS *)RTMemSaferAllocZ(sizeof(*pThisCC->pCredentials));
+ if (pThisCC->pCredentials)
+ pThisCC->fSaferCredentials = true;
+ else
+ {
+ pThisCC->pCredentials = (VMMDEVCREDS *)RTMemAllocZ(sizeof(*pThisCC->pCredentials));
+ AssertReturn(pThisCC->pCredentials, VERR_NO_MEMORY);
+ }
+
+
+ /*
+ * Validate and read the configuration.
+ */
+ PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
+ "AllowGuestToSaveState|"
+ "GetHostTimeDisabled|"
+ "BackdoorLogDisabled|"
+ "KeepCredentials|"
+ "HeapEnabled|"
+ "GuestCoreDumpEnabled|"
+ "GuestCoreDumpDir|"
+ "GuestCoreDumpCount|"
+ "HeartbeatInterval|"
+ "HeartbeatTimeout|"
+ "TestingEnabled|"
+ "TestingMMIO|"
+ "TestingXmlOutputFile|"
+ "TestingCfgDword0|"
+ "TestingCfgDword1|"
+ "TestingCfgDword2|"
+ "TestingCfgDword3|"
+ "TestingCfgDword4|"
+ "TestingCfgDword5|"
+ "TestingCfgDword6|"
+ "TestingCfgDword7|"
+ "TestingCfgDword8|"
+ "TestingCfgDword9|"
+ "HGCMHeapBudgetDefault|"
+ "HGCMHeapBudgetLegacy|"
+ "HGCMHeapBudgetVBoxGuest|"
+ "HGCMHeapBudgetOtherDrv|"
+ "HGCMHeapBudgetRoot|"
+ "HGCMHeapBudgetSystem|"
+ "HGCMHeapBudgetReserved1|"
+ "HGCMHeapBudgetUser|"
+ "HGCMHeapBudgetGuest"
+ ,
+ "");
+
+ rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "AllowGuestToSaveState", &pThis->fAllowGuestToSaveState, true);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"AllowGuestToSaveState\" as a boolean"));
+
+ rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "GetHostTimeDisabled", &pThis->fGetHostTimeDisabled, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"GetHostTimeDisabled\" as a boolean"));
+
+ rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "BackdoorLogDisabled", &pThis->fBackdoorLogDisabled, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"BackdoorLogDisabled\" as a boolean"));
+
+ rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "KeepCredentials", &pThis->fKeepCredentials, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"KeepCredentials\" as a boolean"));
+
+ rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "HeapEnabled", &pThis->fHeapEnabled, true);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"HeapEnabled\" as a boolean"));
+
+ rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "GuestCoreDumpEnabled", &pThis->fGuestCoreDumpEnabled, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed querying \"GuestCoreDumpEnabled\" as a boolean"));
+
+ char *pszGuestCoreDumpDir = NULL;
+ rc = pHlp->pfnCFGMQueryStringAllocDef(pCfg, "GuestCoreDumpDir", &pszGuestCoreDumpDir, "");
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed querying \"GuestCoreDumpDir\" as a string"));
+
+ RTStrCopy(pThis->szGuestCoreDumpDir, sizeof(pThis->szGuestCoreDumpDir), pszGuestCoreDumpDir);
+ PDMDevHlpMMHeapFree(pDevIns, pszGuestCoreDumpDir);
+
+ rc = pHlp->pfnCFGMQueryU32Def(pCfg, "GuestCoreDumpCount", &pThis->cGuestCoreDumps, 3);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"GuestCoreDumpCount\" as a 32-bit unsigned integer"));
+
+ rc = pHlp->pfnCFGMQueryU64Def(pCfg, "HeartbeatInterval", &pThis->cNsHeartbeatInterval, VMMDEV_HEARTBEAT_DEFAULT_INTERVAL);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"HeartbeatInterval\" as a 64-bit unsigned integer"));
+ if (pThis->cNsHeartbeatInterval < RT_NS_100MS / 2)
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Heartbeat interval \"HeartbeatInterval\" too small"));
+
+ rc = pHlp->pfnCFGMQueryU64Def(pCfg, "HeartbeatTimeout", &pThis->cNsHeartbeatTimeout, pThis->cNsHeartbeatInterval * 2);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"HeartbeatTimeout\" as a 64-bit unsigned integer"));
+ if (pThis->cNsHeartbeatTimeout < RT_NS_100MS)
+ return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Heartbeat timeout \"HeartbeatTimeout\" too small"));
+ if (pThis->cNsHeartbeatTimeout <= pThis->cNsHeartbeatInterval + RT_NS_10MS)
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Configuration error: Heartbeat timeout \"HeartbeatTimeout\" value (%'ull ns) is too close to the interval (%'ull ns)"),
+ pThis->cNsHeartbeatTimeout, pThis->cNsHeartbeatInterval);
+
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+ rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "TestingEnabled", &pThis->fTestingEnabled, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed querying \"TestingEnabled\" as a boolean"));
+ rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "TestingMMIO", &pThis->fTestingMMIO, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed querying \"TestingMMIO\" as a boolean"));
+ rc = pHlp->pfnCFGMQueryStringAllocDef(pCfg, "TestingXmlOutputFile", &pThisCC->pszTestingXmlOutput, NULL);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed querying \"TestingXmlOutputFile\" as a string"));
+
+ for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32TestingCfgDwords); i++)
+ {
+ char szName[32];
+ RTStrPrintf(szName, sizeof(szName), "TestingCfgDword%u", i);
+ rc = pHlp->pfnCFGMQueryU32Def(pCfg, szName, &pThis->au32TestingCfgDwords[i], 0);
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Configuration error: Failed querying \"%s\" as a string"), szName);
+ }
+
+
+ /** @todo image-to-load-filename? */
+#endif
+
+#ifdef VBOX_WITH_HGCM
+ /*
+ * Heap budgets for HGCM requestor categories. Take the available host
+ * memory as a rough hint of how much we can handle.
+ */
+ uint64_t cbDefaultBudget = 0;
+ if (RT_FAILURE(RTSystemQueryTotalRam(&cbDefaultBudget)))
+ cbDefaultBudget = 8 * _1G64;
+ LogFunc(("RTSystemQueryTotalRam -> %'RU64 (%RX64)\n", cbDefaultBudget, cbDefaultBudget));
+# if ARCH_BITS == 32
+ cbDefaultBudget = RT_MIN(cbDefaultBudget, _512M);
+# endif
+ cbDefaultBudget /= 8; /* One eighth of physical memory ... */
+ cbDefaultBudget /= RT_ELEMENTS(pThisCC->aHgcmAcc); /* over 3 accounting categories. (8GiB -> 341MiB) */
+ cbDefaultBudget = RT_MIN(cbDefaultBudget, _1G); /* max 1024MiB */
+ cbDefaultBudget = RT_MAX(cbDefaultBudget, _32M); /* min 32MiB */
+ rc = pHlp->pfnCFGMQueryU64Def(pCfg, "HGCMHeapBudgetDefault", &cbDefaultBudget, cbDefaultBudget);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed querying \"HGCMHeapBudgetDefault\" as a 64-bit unsigned integer"));
+
+ LogRel(("VMMDev: cbDefaultBudget: %'RU64 (%RX64)\n", cbDefaultBudget, cbDefaultBudget));
+ static const struct { const char *pszName; unsigned idx; } s_aCfgHeapBudget[] =
+ {
+ { "HGCMHeapBudgetKernel", VMMDEV_HGCM_CATEGORY_KERNEL },
+ { "HGCMHeapBudgetRoot", VMMDEV_HGCM_CATEGORY_ROOT },
+ { "HGCMHeapBudgetUser", VMMDEV_HGCM_CATEGORY_USER },
+ };
+ AssertCompile(RT_ELEMENTS(s_aCfgHeapBudget) == RT_ELEMENTS(pThisCC->aHgcmAcc));
+ for (uintptr_t i = 0; i < RT_ELEMENTS(s_aCfgHeapBudget); i++)
+ {
+ uintptr_t const idx = s_aCfgHeapBudget[i].idx;
+ rc = pHlp->pfnCFGMQueryU64Def(pCfg, s_aCfgHeapBudget[i].pszName,
+ &pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig, cbDefaultBudget);
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Configuration error: Failed querying \"%s\" as a 64-bit unsigned integer"),
+ s_aCfgHeapBudget[i].pszName);
+ pThisCC->aHgcmAcc[idx].cbHeapBudget = pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig;
+ if (pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig != cbDefaultBudget)
+ LogRel(("VMMDev: %s: %'RU64 (%#RX64)\n", s_aCfgHeapBudget[i].pszName,
+ pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig, pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig));
+
+ const char * const pszCatName = &s_aCfgHeapBudget[i].pszName[sizeof("HGCMHeapBudget") - 1];
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->aHgcmAcc[idx].cbHeapBudget, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_BYTES, "Currently available budget", "HGCM-%s/BudgetAvailable", pszCatName);
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_BYTES, "Configured budget", "HGCM-%s/BudgetConfig", pszCatName);
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->aHgcmAcc[idx].StateMsgHeapUsage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_BYTES_PER_CALL, "Message heap usage", "HGCM-%s/MessageHeapUsage", pszCatName);
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->aHgcmAcc[idx].StatBudgetOverruns, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
+ STAMUNIT_BYTES, "Budget overruns and allocation errors", "HGCM-%s/BudgetOverruns", pszCatName);
+ }
+#endif
+
+ /*
+ * <missing comment>
+ */
+ pThis->cbGuestRAM = PDMDevHlpMMPhysGetRamSize(pDevIns);
+
+ /*
+ * We do our own locking entirely. So, install NOP critsect for the device
+ * and create our own critsect for use where it really matters (++).
+ */
+ rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
+ AssertRCReturn(rc, rc);
+ rc = PDMDevHlpCritSectInit(pDevIns, &pThis->CritSect, RT_SRC_POS, "VMMDev#%u", iInstance);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Register the backdoor logging port
+ */
+ rc = PDMDevHlpIoPortCreateAndMap(pDevIns, RTLOG_DEBUG_PORT, 1, vmmdevBackdoorLog, NULL /*pfnIn*/,
+ "VMMDev backdoor logging", NULL, &pThis->hIoPortBackdoorLog);
+ AssertRCReturn(rc, rc);
+
+#ifdef VMMDEV_WITH_ALT_TIMESYNC
+ /*
+ * Alternative timesync source.
+ *
+ * This was orignally added for creating a simple time sync service in an
+ * OpenBSD guest without requiring VBoxGuest and VBoxService to be ported
+ * first. We keep it in case it comes in handy.
+ */
+ rc = PDMDevHlpIoPortCreateAndMap(pDevIns, 0x505, 1, vmmdevAltTimeSyncWrite, vmmdevAltTimeSyncRead,
+ "VMMDev timesync backdoor", NULL /*paExtDescs*/, &pThis->hIoPortAltTimesync);
+ AssertRCReturn(rc, rc);
+#endif
+
+ /*
+ * Register the PCI device.
+ */
+ rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (pPciDev->uDevFn != 32 || iInstance != 0)
+ Log(("!!WARNING!!: pThis->PciDev.uDevFn=%d (ignore if testcase or no started by Main)\n", pPciDev->uDevFn));
+
+ /*
+ * The I/O ports, PCI region #0. This has two separate I/O port mappings in it,
+ * so we have to do it via the mapper callback.
+ */
+ rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(0, 0), vmmdevRequestHandler, NULL /*pfnIn*/,
+ NULL /*pvUser*/, "VMMDev Request Handler", NULL, &pThis->hIoPortReq);
+ AssertRCReturn(rc, rc);
+
+ rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(1, 0), vmmdevFastRequestHandler,
+ vmmdevFastRequestIrqAck, NULL, "VMMDev Fast R0/RC Requests", NULL /*pvUser*/, &pThis->hIoPortFast);
+ AssertRCReturn(rc, rc);
+
+ rc = PDMDevHlpPCIIORegionRegisterIoCustom(pDevIns, 0, 0x20, vmmdevIOPortRegionMap);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Allocate and initialize the MMIO2 memory, PCI region #1.
+ */
+ rc = PDMDevHlpPCIIORegionCreateMmio2(pDevIns, 1 /*iPciRegion*/, VMMDEV_RAM_SIZE, PCI_ADDRESS_SPACE_MEM, "VMMDev",
+ (void **)&pThisCC->pVMMDevRAMR3, &pThis->hMmio2VMMDevRAM);
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Failed to create the %u (%#x) byte MMIO2 region for the VMM device"),
+ VMMDEV_RAM_SIZE, VMMDEV_RAM_SIZE);
+ vmmdevInitRam(pThisCC);
+
+ /*
+ * The MMIO2 heap (used for real-mode VT-x trickery), PCI region #2.
+ */
+ if (pThis->fHeapEnabled)
+ {
+ rc = PDMDevHlpPCIIORegionCreateMmio2Ex(pDevIns, 2 /*iPciRegion*/, VMMDEV_HEAP_SIZE, PCI_ADDRESS_SPACE_MEM_PREFETCH,
+ 0 /*fFlags*/, vmmdevMmio2HeapRegionMap, "VMMDev Heap",
+ (void **)&pThisCC->pVMMDevHeapR3, &pThis->hMmio2Heap);
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Failed to create the %u (%#x) bytes MMIO2 heap region for the VMM device"),
+ VMMDEV_HEAP_SIZE, VMMDEV_HEAP_SIZE);
+
+ /* Register the memory area with PDM so HM can access it before it's mapped. */
+ rc = PDMDevHlpRegisterVMMDevHeap(pDevIns, NIL_RTGCPHYS, pThisCC->pVMMDevHeapR3, VMMDEV_HEAP_SIZE);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+ /*
+ * Initialize testing.
+ */
+ rc = vmmdevR3TestingInitialize(pDevIns);
+ if (RT_FAILURE(rc))
+ return rc;
+#endif
+
+ /*
+ * Get the corresponding connector interface
+ */
+ rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "VMM Driver Port");
+ if (RT_SUCCESS(rc))
+ {
+ pThisCC->pDrv = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIVMMDEVCONNECTOR);
+ AssertMsgReturn(pThisCC->pDrv, ("LUN #0 doesn't have a VMMDev connector interface!\n"), VERR_PDM_MISSING_INTERFACE);
+#ifdef VBOX_WITH_HGCM
+ pThisCC->pHGCMDrv = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIHGCMCONNECTOR);
+ if (!pThisCC->pHGCMDrv)
+ {
+ Log(("LUN #0 doesn't have a HGCM connector interface, HGCM is not supported. rc=%Rrc\n", rc));
+ /* this is not actually an error, just means that there is no support for HGCM */
+ }
+#endif
+ /* Query the initial balloon size. */
+ AssertPtr(pThisCC->pDrv->pfnQueryBalloonSize);
+ rc = pThisCC->pDrv->pfnQueryBalloonSize(pThisCC->pDrv, &pThis->cMbMemoryBalloon);
+ AssertRC(rc);
+
+ Log(("Initial balloon size %x\n", pThis->cMbMemoryBalloon));
+ }
+ else if (rc == VERR_PDM_NO_ATTACHED_DRIVER)
+ {
+ Log(("%s/%d: warning: no driver attached to LUN #0!\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ rc = VINF_SUCCESS;
+ }
+ else
+ AssertMsgFailedReturn(("Failed to attach LUN #0! rc=%Rrc\n", rc), rc);
+
+ /*
+ * Attach status driver for shared folders (optional).
+ */
+ PPDMIBASE pBase;
+ rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
+ if (RT_SUCCESS(rc))
+ pThisCC->SharedFolders.pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
+ else if (rc != VERR_PDM_NO_ATTACHED_DRIVER)
+ {
+ AssertMsgFailed(("Failed to attach to status driver. rc=%Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * Register saved state and init the HGCM CmdList critsect.
+ */
+ rc = PDMDevHlpSSMRegisterEx(pDevIns, VMMDEV_SAVED_STATE_VERSION, sizeof(*pThis), NULL,
+ NULL, vmmdevLiveExec, NULL,
+ NULL, vmmdevSaveExec, NULL,
+ NULL, vmmdevLoadExec, vmmdevLoadStateDone);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Create heartbeat checking timer.
+ */
+ rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, vmmDevHeartbeatFlatlinedTimer, pThis,
+ TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "Heartbeat flatlined", &pThis->hFlatlinedTimer);
+ AssertRCReturn(rc, rc);
+
+#ifdef VBOX_WITH_HGCM
+ rc = vmmdevR3HgcmInit(pThisCC);
+ AssertRCReturn(rc, rc);
+#endif
+
+ /*
+ * In this version of VirtualBox the GUI checks whether "needs host cursor"
+ * changes.
+ */
+ pThis->fMouseCapabilities |= VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR;
+
+ /*
+ * In this version of VirtualBox full mouse state can be provided to the guest over DevVMM.
+ */
+ pThis->fMouseCapabilities |= VMMDEV_MOUSE_HOST_USES_FULL_STATE_PROTOCOL;
+
+ /*
+ * Statistics.
+ */
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMemBalloonChunks, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Memory balloon size", "BalloonChunks");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatFastIrqAckR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Fast IRQ acknowledgments handled in ring-3.", "FastIrqAckR3");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatFastIrqAckRZ, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Fast IRQ acknowledgments handled in ring-0 or raw-mode.", "FastIrqAckRZ");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatSlowIrqAck, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Slow IRQ acknowledgments (old style).", "SlowIrqAck");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->StatReqBufAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Times a larger request buffer was required.", "LargeReqBufAllocs");
+#ifdef VBOX_WITH_HGCM
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->StatHgcmCmdArrival, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
+ "Profiling HGCM call arrival processing", "/HGCM/MsgArrival");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->StatHgcmCmdCompletion, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
+ "Profiling HGCM call completion processing", "/HGCM/MsgCompletion");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->StatHgcmCmdTotal, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
+ "Profiling whole HGCM call.", "/HGCM/MsgTotal");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->StatHgcmLargeCmdAllocs,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Times the allocation cache could not be used.", "/HGCM/LargeCmdAllocs");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThisCC->StatHgcmFailedPageListLocking,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Times no-bounce page list locking failed.", "/HGCM/FailedPageListLocking");
+#endif
+
+ /*
+ * Generate a unique session id for this VM; it will be changed for each
+ * start, reset or restore. This can be used for restore detection inside
+ * the guest.
+ */
+ pThis->idSession = ASMReadTSC();
+ return rc;
+}
+
+#else /* !IN_RING3 */
+
+/**
+ * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
+ */
+static DECLCALLBACK(int) vmmdevRZConstruct(PPDMDEVINS pDevIns)
+{
+ PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+
+ int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
+ AssertRCReturn(rc, rc);
+
+#if 0
+ rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPortBackdoorLog, vmmdevBackdoorLog, NULL /*pfnIn*/, NULL /*pvUser*/);
+ AssertRCReturn(rc, rc);
+#endif
+#if 0 && defined(VMMDEV_WITH_ALT_TIMESYNC)
+ rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPortAltTimesync, vmmdevAltTimeSyncWrite, vmmdevAltTimeSyncRead, NULL);
+ AssertRCReturn(rc, rc);
+#endif
+
+ /*
+ * We map the first page of the VMMDevRAM into raw-mode and kernel contexts so we
+ * can handle interrupt acknowledge requests more timely (vmmdevFastRequestIrqAck).
+ */
+ rc = PDMDevHlpMmio2SetUpContext(pDevIns, pThis->hMmio2VMMDevRAM, 0, GUEST_PAGE_SIZE, (void **)&pThisCC->CTX_SUFF(pVMMDevRAM));
+ AssertRCReturn(rc, rc);
+
+ rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPortFast, vmmdevFastRequestHandler, vmmdevFastRequestIrqAck, NULL);
+ AssertRCReturn(rc, rc);
+
+# ifndef VBOX_WITHOUT_TESTING_FEATURES
+ /*
+ * Initialize testing.
+ */
+ rc = vmmdevRZTestingInitialize(pDevIns);
+ AssertRCReturn(rc, rc);
+# endif
+
+ return VINF_SUCCESS;
+}
+
+#endif /* !IN_RING3 */
+
+/**
+ * The device registration structure.
+ */
+extern "C" const PDMDEVREG g_DeviceVMMDev =
+{
+ /* .u32Version = */ PDM_DEVREG_VERSION,
+ /* .uReserved0 = */ 0,
+ /* .szName = */ "VMMDev",
+ /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
+ /* .fClass = */ PDM_DEVREG_CLASS_VMM_DEV,
+ /* .cMaxInstances = */ 1,
+ /* .uSharedVersion = */ 42,
+ /* .cbInstanceShared = */ sizeof(VMMDEV),
+ /* .cbInstanceCC = */ sizeof(VMMDEVCC),
+ /* .cbInstanceRC = */ sizeof(VMMDEVRC),
+ /* .cMaxPciDevices = */ 1,
+ /* .cMaxMsixVectors = */ 0,
+ /* .pszDescription = */ "VirtualBox VMM Device\n",
+#if defined(IN_RING3)
+ /* .pszRCMod = */ "VBoxDDRC.rc",
+ /* .pszR0Mod = */ "VBoxDDR0.r0",
+ /* .pfnConstruct = */ vmmdevConstruct,
+ /* .pfnDestruct = */ vmmdevDestruct,
+# ifdef VBOX_WITH_RAW_MODE_KEEP
+ /* .pfnRelocate = */ vmmdevRelocate,
+# else
+ /* .pfnRelocate = */ NULL,
+# endif
+ /* .pfnMemSetup = */ NULL,
+ /* .pfnPowerOn = */ NULL,
+ /* .pfnReset = */ vmmdevReset,
+ /* .pfnSuspend = */ NULL,
+ /* .pfnResume = */ NULL,
+ /* .pfnAttach = */ NULL,
+ /* .pfnDetach = */ NULL,
+ /* .pfnQueryInterface = */ NULL,
+ /* .pfnInitComplete = */ NULL,
+ /* .pfnPowerOff = */ NULL,
+ /* .pfnSoftReset = */ NULL,
+ /* .pfnReserved0 = */ NULL,
+ /* .pfnReserved1 = */ NULL,
+ /* .pfnReserved2 = */ NULL,
+ /* .pfnReserved3 = */ NULL,
+ /* .pfnReserved4 = */ NULL,
+ /* .pfnReserved5 = */ NULL,
+ /* .pfnReserved6 = */ NULL,
+ /* .pfnReserved7 = */ NULL,
+#elif defined(IN_RING0)
+ /* .pfnEarlyConstruct = */ NULL,
+ /* .pfnConstruct = */ vmmdevRZConstruct,
+ /* .pfnDestruct = */ NULL,
+ /* .pfnFinalDestruct = */ NULL,
+ /* .pfnRequest = */ NULL,
+ /* .pfnReserved0 = */ NULL,
+ /* .pfnReserved1 = */ NULL,
+ /* .pfnReserved2 = */ NULL,
+ /* .pfnReserved3 = */ NULL,
+ /* .pfnReserved4 = */ NULL,
+ /* .pfnReserved5 = */ NULL,
+ /* .pfnReserved6 = */ NULL,
+ /* .pfnReserved7 = */ NULL,
+#elif defined(IN_RC)
+ /* .pfnConstruct = */ vmmdevRZConstruct,
+ /* .pfnReserved0 = */ NULL,
+ /* .pfnReserved1 = */ NULL,
+ /* .pfnReserved2 = */ NULL,
+ /* .pfnReserved3 = */ NULL,
+ /* .pfnReserved4 = */ NULL,
+ /* .pfnReserved5 = */ NULL,
+ /* .pfnReserved6 = */ NULL,
+ /* .pfnReserved7 = */ NULL,
+#else
+# error "Not in IN_RING3, IN_RING0 or IN_RC!"
+#endif
+ /* .u32VersionEnd = */ PDM_DEVREG_VERSION
+};
+
+#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
diff --git a/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp b/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp
new file mode 100644
index 00000000..c4ee37bb
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp
@@ -0,0 +1,2776 @@
+/* $Id: VMMDevHGCM.cpp $ */
+/** @file
+ * VMMDev - HGCM - Host-Guest Communication Manager Device.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DEV_VMM
+#include <iprt/alloc.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/param.h>
+#include <iprt/string.h>
+
+#include <VBox/AssertGuest.h>
+#include <VBox/err.h>
+#include <VBox/hgcmsvc.h>
+#include <VBox/log.h>
+
+#include "VMMDevHGCM.h"
+
+#ifdef DEBUG
+# define VBOX_STRICT_GUEST
+#endif
+
+#ifdef VBOX_WITH_DTRACE
+# include "dtrace/VBoxDD.h"
+#else
+# define VBOXDD_HGCMCALL_ENTER(a,b,c,d) do { } while (0)
+# define VBOXDD_HGCMCALL_COMPLETED_REQ(a,b) do { } while (0)
+# define VBOXDD_HGCMCALL_COMPLETED_EMT(a,b) do { } while (0)
+# define VBOXDD_HGCMCALL_COMPLETED_DONE(a,b,c,d) do { } while (0)
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+typedef enum VBOXHGCMCMDTYPE
+{
+ VBOXHGCMCMDTYPE_LOADSTATE = 0,
+ VBOXHGCMCMDTYPE_CONNECT,
+ VBOXHGCMCMDTYPE_DISCONNECT,
+ VBOXHGCMCMDTYPE_CALL,
+ VBOXHGCMCMDTYPE_SizeHack = 0x7fffffff
+} VBOXHGCMCMDTYPE;
+
+/**
+ * Information about a 32 or 64 bit parameter.
+ */
+typedef struct VBOXHGCMPARMVAL
+{
+ /** Actual value. Both 32 and 64 bit is saved here. */
+ uint64_t u64Value;
+
+ /** Offset from the start of the request where the value is stored. */
+ uint32_t offValue;
+
+ /** Size of the value: 4 for 32 bit and 8 for 64 bit. */
+ uint32_t cbValue;
+
+} VBOXHGCMPARMVAL;
+
+/**
+ * Information about a pointer parameter.
+ */
+typedef struct VBOXHGCMPARMPTR
+{
+ /** Size of the buffer described by the pointer parameter. */
+ uint32_t cbData;
+
+/** @todo save 8 bytes here by putting offFirstPage, cPages, and f32Direction
+ * into a bitfields like in VBOXHGCMPARMPAGES. */
+ /** Offset in the first physical page of the region. */
+ uint32_t offFirstPage;
+
+ /** How many pages. */
+ uint32_t cPages;
+
+ /** How the buffer should be copied VBOX_HGCM_F_PARM_*. */
+ uint32_t fu32Direction;
+
+ /** Pointer to array of the GC physical addresses for these pages.
+ * It is assumed that the physical address of the locked resident guest page
+ * does not change. */
+ RTGCPHYS *paPages;
+
+ /** For single page requests. */
+ RTGCPHYS GCPhysSinglePage;
+
+} VBOXHGCMPARMPTR;
+
+
+/**
+ * Pages w/o bounce buffering.
+ */
+typedef struct VBOXHGCMPARMPAGES
+{
+ /** The buffer size. */
+ uint32_t cbData;
+ /** Start of buffer offset into the first page. */
+ uint32_t offFirstPage : 12;
+ /** VBOX_HGCM_F_PARM_XXX flags. */
+ uint32_t fFlags : 3;
+ /** Set if we've locked all the pages. */
+ uint32_t fLocked : 1;
+ /** Number of pages. */
+ uint32_t cPages : 16;
+ /**< Array of page locks followed by array of page pointers, the first page
+ * pointer is adjusted by offFirstPage. */
+ PPGMPAGEMAPLOCK paPgLocks;
+} VBOXHGCMPARMPAGES;
+
+/**
+ * Information about a guest HGCM parameter.
+ */
+typedef struct VBOXHGCMGUESTPARM
+{
+ /** The parameter type. */
+ HGCMFunctionParameterType enmType;
+
+ union
+ {
+ VBOXHGCMPARMVAL val;
+ VBOXHGCMPARMPTR ptr;
+ VBOXHGCMPARMPAGES Pages;
+ } u;
+
+} VBOXHGCMGUESTPARM;
+
+typedef struct VBOXHGCMCMD
+{
+ /** Active commands, list is protected by critsectHGCMCmdList. */
+ RTLISTNODE node;
+
+ /** The type of the command (VBOXHGCMCMDTYPE). */
+ uint8_t enmCmdType;
+
+ /** Whether the command was cancelled by the guest. */
+ bool fCancelled;
+
+ /** Set if allocated from the memory cache, clear if heap. */
+ bool fMemCache;
+
+ /** Whether the command was restored from saved state. */
+ bool fRestored : 1;
+ /** Whether this command has a no-bounce page list and needs to be restored
+ * from guest memory the old fashioned way. */
+ bool fRestoreFromGuestMem : 1;
+
+ /** Copy of VMMDevRequestHeader::fRequestor.
+ * @note Only valid if VBOXGSTINFO2_F_REQUESTOR_INFO is set in
+ * VMMDevState.guestInfo2.fFeatures. */
+ uint32_t fRequestor;
+
+ /** GC physical address of the guest request. */
+ RTGCPHYS GCPhys;
+
+ /** Request packet size. */
+ uint32_t cbRequest;
+
+ /** The type of the guest request. */
+ VMMDevRequestType enmRequestType;
+
+ /** Pointer to the locked request, NULL if not locked. */
+ void *pvReqLocked;
+ /** The PGM lock for GCPhys if pvReqLocked is not NULL. */
+ PGMPAGEMAPLOCK ReqMapLock;
+
+ /** The accounting index (into VMMDEVR3::aHgcmAcc). */
+ uint8_t idxHeapAcc;
+ uint8_t abPadding[3];
+ /** The heap cost of this command. */
+ uint32_t cbHeapCost;
+
+ /** The STAM_GET_TS() value when the request arrived. */
+ uint64_t tsArrival;
+ /** The STAM_GET_TS() value when the hgcmR3Completed() is called. */
+ uint64_t tsComplete;
+
+ union
+ {
+ struct
+ {
+ uint32_t u32ClientID;
+ HGCMServiceLocation *pLoc; /**< Allocated after this structure. */
+ } connect;
+
+ struct
+ {
+ uint32_t u32ClientID;
+ } disconnect;
+
+ struct
+ {
+ /* Number of elements in paGuestParms and paHostParms arrays. */
+ uint32_t cParms;
+
+ uint32_t u32ClientID;
+
+ uint32_t u32Function;
+
+ /** Pointer to information about guest parameters in case of a Call request.
+ * Follows this structure in the same memory block.
+ */
+ VBOXHGCMGUESTPARM *paGuestParms;
+
+ /** Pointer to converted host parameters in case of a Call request.
+ * Follows this structure in the same memory block.
+ */
+ VBOXHGCMSVCPARM *paHostParms;
+
+ /* VBOXHGCMGUESTPARM[] */
+ /* VBOXHGCMSVCPARM[] */
+ } call;
+ } u;
+} VBOXHGCMCMD;
+
+
+/**
+ * Version for the memory cache.
+ */
+typedef struct VBOXHGCMCMDCACHED
+{
+ VBOXHGCMCMD Core; /**< 120 */
+ VBOXHGCMGUESTPARM aGuestParms[6]; /**< 40 * 6 = 240 */
+ VBOXHGCMSVCPARM aHostParms[6]; /**< 24 * 6 = 144 */
+} VBOXHGCMCMDCACHED; /**< 120+240+144 = 504 */
+AssertCompile(sizeof(VBOXHGCMCMD) <= 120);
+AssertCompile(sizeof(VBOXHGCMGUESTPARM) <= 40);
+AssertCompile(sizeof(VBOXHGCMSVCPARM) <= 24);
+AssertCompile(sizeof(VBOXHGCMCMDCACHED) <= 512);
+AssertCompile(sizeof(VBOXHGCMCMDCACHED) > sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+DECLINLINE(void *) vmmdevR3HgcmCallMemAllocZ(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested);
+
+
+
+DECLINLINE(int) vmmdevR3HgcmCmdListLock(PVMMDEVCC pThisCC)
+{
+ int rc = RTCritSectEnter(&pThisCC->critsectHGCMCmdList);
+ AssertRC(rc);
+ return rc;
+}
+
+DECLINLINE(void) vmmdevR3HgcmCmdListUnlock(PVMMDEVCC pThisCC)
+{
+ int rc = RTCritSectLeave(&pThisCC->critsectHGCMCmdList);
+ AssertRC(rc);
+}
+
+/** Allocate and initialize VBOXHGCMCMD structure for HGCM request.
+ *
+ * @returns Pointer to the command on success, NULL otherwise.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param enmCmdType Type of the command.
+ * @param GCPhys The guest physical address of the HGCM request.
+ * @param cbRequest The size of the HGCM request.
+ * @param cParms Number of HGCM parameters for VBOXHGCMCMDTYPE_CALL command.
+ * @param fRequestor The VMMDevRequestHeader::fRequestor value.
+ */
+static PVBOXHGCMCMD vmmdevR3HgcmCmdAlloc(PVMMDEVCC pThisCC, VBOXHGCMCMDTYPE enmCmdType, RTGCPHYS GCPhys,
+ uint32_t cbRequest, uint32_t cParms, uint32_t fRequestor)
+{
+ /*
+ * Pick the heap accounting category.
+ *
+ * Initial idea was to just use what VMMDEV_REQUESTOR_USR_MASK yields directly,
+ * but there are so many unused categories then (DRV, RESERVED1, GUEST). Better
+ * to have fewer and more heap available in each.
+ */
+ uintptr_t idxHeapAcc;
+ if (fRequestor != VMMDEV_REQUESTOR_LEGACY)
+ switch (fRequestor & VMMDEV_REQUESTOR_USR_MASK)
+ {
+ case VMMDEV_REQUESTOR_USR_NOT_GIVEN:
+ case VMMDEV_REQUESTOR_USR_DRV:
+ case VMMDEV_REQUESTOR_USR_DRV_OTHER:
+ idxHeapAcc = VMMDEV_HGCM_CATEGORY_KERNEL;
+ break;
+ case VMMDEV_REQUESTOR_USR_ROOT:
+ case VMMDEV_REQUESTOR_USR_SYSTEM:
+ idxHeapAcc = VMMDEV_HGCM_CATEGORY_ROOT;
+ break;
+ default:
+ AssertFailed(); RT_FALL_THRU();
+ case VMMDEV_REQUESTOR_USR_RESERVED1:
+ case VMMDEV_REQUESTOR_USR_USER:
+ case VMMDEV_REQUESTOR_USR_GUEST:
+ idxHeapAcc = VMMDEV_HGCM_CATEGORY_USER;
+ break;
+ }
+ else
+ idxHeapAcc = VMMDEV_HGCM_CATEGORY_KERNEL;
+
+#if 1
+ /*
+ * Try use the cache.
+ */
+ VBOXHGCMCMDCACHED *pCmdCached;
+ AssertCompile(sizeof(*pCmdCached) >= sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
+ if (cParms <= RT_ELEMENTS(pCmdCached->aGuestParms))
+ {
+ if (sizeof(*pCmdCached) <= pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget)
+ {
+ int rc = RTMemCacheAllocEx(pThisCC->hHgcmCmdCache, (void **)&pCmdCached);
+ if (RT_SUCCESS(rc))
+ {
+ RT_ZERO(*pCmdCached);
+ pCmdCached->Core.fMemCache = true;
+ pCmdCached->Core.GCPhys = GCPhys;
+ pCmdCached->Core.cbRequest = cbRequest;
+ pCmdCached->Core.enmCmdType = enmCmdType;
+ pCmdCached->Core.fRequestor = fRequestor;
+ pCmdCached->Core.idxHeapAcc = (uint8_t)idxHeapAcc;
+ pCmdCached->Core.cbHeapCost = sizeof(*pCmdCached);
+ Log5Func(("aHgcmAcc[%zu] %#RX64 -= %#zx (%p)\n",
+ idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, sizeof(*pCmdCached), &pCmdCached->Core));
+ pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget -= sizeof(*pCmdCached);
+
+ if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ pCmdCached->Core.u.call.cParms = cParms;
+ pCmdCached->Core.u.call.paGuestParms = pCmdCached->aGuestParms;
+ pCmdCached->Core.u.call.paHostParms = pCmdCached->aHostParms;
+ }
+ else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
+ pCmdCached->Core.u.connect.pLoc = (HGCMServiceLocation *)(&pCmdCached->Core + 1);
+
+ Assert(!pCmdCached->Core.pvReqLocked);
+
+ Log3Func(("returns %p (enmCmdType=%d GCPhys=%RGp)\n", &pCmdCached->Core, enmCmdType, GCPhys));
+ return &pCmdCached->Core;
+ }
+ }
+ else
+ LogFunc(("Heap budget overrun: sizeof(*pCmdCached)=%#zx aHgcmAcc[%zu].cbHeapBudget=%#RX64 - enmCmdType=%d\n",
+ sizeof(*pCmdCached), idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, enmCmdType));
+ STAM_REL_COUNTER_INC(&pThisCC->aHgcmAcc[idxHeapAcc].StatBudgetOverruns);
+ return NULL;
+ }
+ STAM_REL_COUNTER_INC(&pThisCC->StatHgcmLargeCmdAllocs);
+
+#else
+ RT_NOREF(pThisCC);
+#endif
+
+ /* Size of required memory buffer. */
+ const uint32_t cbCmd = sizeof(VBOXHGCMCMD) + cParms * (sizeof(VBOXHGCMGUESTPARM) + sizeof(VBOXHGCMSVCPARM))
+ + (enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? sizeof(HGCMServiceLocation) : 0);
+ if (cbCmd <= pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget)
+ {
+ PVBOXHGCMCMD pCmd = (PVBOXHGCMCMD)RTMemAllocZ(cbCmd);
+ if (pCmd)
+ {
+ pCmd->enmCmdType = enmCmdType;
+ pCmd->GCPhys = GCPhys;
+ pCmd->cbRequest = cbRequest;
+ pCmd->fRequestor = fRequestor;
+ pCmd->idxHeapAcc = (uint8_t)idxHeapAcc;
+ pCmd->cbHeapCost = cbCmd;
+ Log5Func(("aHgcmAcc[%zu] %#RX64 -= %#x (%p)\n", idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, cbCmd, pCmd));
+ pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget -= cbCmd;
+
+ if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ pCmd->u.call.cParms = cParms;
+ if (cParms)
+ {
+ pCmd->u.call.paGuestParms = (VBOXHGCMGUESTPARM *)((uint8_t *)pCmd
+ + sizeof(struct VBOXHGCMCMD));
+ pCmd->u.call.paHostParms = (VBOXHGCMSVCPARM *)((uint8_t *)pCmd->u.call.paGuestParms
+ + cParms * sizeof(VBOXHGCMGUESTPARM));
+ }
+ }
+ else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
+ pCmd->u.connect.pLoc = (HGCMServiceLocation *)(pCmd + 1);
+ }
+ Log3Func(("returns %p (enmCmdType=%d GCPhys=%RGp cbCmd=%#x)\n", pCmd, enmCmdType, GCPhys, cbCmd));
+ return pCmd;
+ }
+ STAM_REL_COUNTER_INC(&pThisCC->aHgcmAcc[idxHeapAcc].StatBudgetOverruns);
+ LogFunc(("Heap budget overrun: cbCmd=%#x aHgcmAcc[%zu].cbHeapBudget=%#RX64 - enmCmdType=%d\n",
+ cbCmd, idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, enmCmdType));
+ return NULL;
+}
+
+/** Deallocate VBOXHGCMCMD memory.
+ *
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pCmd Command to deallocate.
+ */
+static void vmmdevR3HgcmCmdFree(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd)
+{
+ if (pCmd)
+ {
+ Assert( pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL
+ || pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT
+ || pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT
+ || pCmd->enmCmdType == VBOXHGCMCMDTYPE_LOADSTATE);
+ if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ uint32_t i;
+ for (i = 0; i < pCmd->u.call.cParms; ++i)
+ {
+ VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+
+ if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
+ || pGuestParm->enmType == VMMDevHGCMParmType_PageList
+ || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
+ {
+ Assert(pHostParm->type == VBOX_HGCM_SVC_PARM_PTR);
+ if (pGuestParm->u.ptr.paPages != &pGuestParm->u.ptr.GCPhysSinglePage)
+ RTMemFree(pGuestParm->u.ptr.paPages);
+ RTMemFreeZ(pHostParm->u.pointer.addr, pGuestParm->u.ptr.cbData);
+ }
+ else if (pGuestParm->enmType == VMMDevHGCMParmType_Embedded)
+ {
+ Assert(pHostParm->type == VBOX_HGCM_SVC_PARM_PTR);
+ RTMemFreeZ(pHostParm->u.pointer.addr, pGuestParm->u.ptr.cbData);
+ }
+ else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
+ {
+ Assert(pHostParm->type == VBOX_HGCM_SVC_PARM_PAGES);
+ if (pGuestParm->u.Pages.paPgLocks)
+ {
+ if (pGuestParm->u.Pages.fLocked)
+ PDMDevHlpPhysBulkReleasePageMappingLocks(pDevIns, pGuestParm->u.Pages.cPages,
+ pGuestParm->u.Pages.paPgLocks);
+ RTMemFree(pGuestParm->u.Pages.paPgLocks);
+ pGuestParm->u.Pages.paPgLocks = NULL;
+ }
+ }
+ else
+ Assert(pHostParm->type != VBOX_HGCM_SVC_PARM_PTR && pHostParm->type != VBOX_HGCM_SVC_PARM_PAGES);
+ }
+ }
+
+ if (pCmd->pvReqLocked)
+ {
+ PDMDevHlpPhysReleasePageMappingLock(pDevIns, &pCmd->ReqMapLock);
+ pCmd->pvReqLocked = NULL;
+ }
+
+ pCmd->enmCmdType = UINT8_MAX; /* poison */
+
+ /* Update heap budget. Need the critsect to do this safely. */
+ Assert(pCmd->cbHeapCost != 0);
+ uintptr_t idx = pCmd->idxHeapAcc;
+ AssertStmt(idx < RT_ELEMENTS(pThisCC->aHgcmAcc), idx %= RT_ELEMENTS(pThisCC->aHgcmAcc));
+
+ int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
+ PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->CritSect, rcLock);
+
+ Log5Func(("aHgcmAcc[%zu] %#RX64 += %#x (%p)\n", idx, pThisCC->aHgcmAcc[idx].cbHeapBudget, pCmd->cbHeapCost, pCmd));
+ pThisCC->aHgcmAcc[idx].cbHeapBudget += pCmd->cbHeapCost;
+ AssertMsg(pThisCC->aHgcmAcc[idx].cbHeapBudget <= pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig,
+ ("idx=%d (%d) fRequestor=%#x pCmd=%p: %#RX64 vs %#RX64 -> %#RX64\n", idx, pCmd->idxHeapAcc, pCmd->fRequestor, pCmd,
+ pThisCC->aHgcmAcc[idx].cbHeapBudget, pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig,
+ pThisCC->aHgcmAcc[idx].cbHeapBudget - pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig));
+ pCmd->cbHeapCost = 0;
+
+#if 1
+ if (pCmd->fMemCache)
+ {
+ RTMemCacheFree(pThisCC->hHgcmCmdCache, pCmd);
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect); /* releasing it after just to be on the safe side. */
+ }
+ else
+#endif
+ {
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ RTMemFree(pCmd);
+ }
+ }
+}
+
+/** Add VBOXHGCMCMD to the list of pending commands.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pCmd Command to add.
+ */
+static int vmmdevR3HgcmAddCommand(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd)
+{
+ int rc = vmmdevR3HgcmCmdListLock(pThisCC);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("%p type %d\n", pCmd, pCmd->enmCmdType));
+
+ RTListPrepend(&pThisCC->listHGCMCmd, &pCmd->node);
+
+ /* stats */
+ uintptr_t idx = pCmd->idxHeapAcc;
+ AssertStmt(idx < RT_ELEMENTS(pThisCC->aHgcmAcc), idx %= RT_ELEMENTS(pThisCC->aHgcmAcc));
+ STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->aHgcmAcc[idx].StateMsgHeapUsage, pCmd->cbHeapCost);
+
+ /* Automatically enable HGCM events, if there are HGCM commands. */
+ if ( pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT
+ || pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT
+ || pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ LogFunc(("u32HGCMEnabled = %d\n", pThisCC->u32HGCMEnabled));
+ if (ASMAtomicCmpXchgU32(&pThisCC->u32HGCMEnabled, 1, 0))
+ VMMDevCtlSetGuestFilterMask(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM, 0);
+ }
+
+ vmmdevR3HgcmCmdListUnlock(pThisCC);
+ return rc;
+}
+
+/** Remove VBOXHGCMCMD from the list of pending commands.
+ *
+ * @returns VBox status code.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pCmd Command to remove.
+ */
+static int vmmdevR3HgcmRemoveCommand(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd)
+{
+ int rc = vmmdevR3HgcmCmdListLock(pThisCC);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("%p\n", pCmd));
+
+ RTListNodeRemove(&pCmd->node);
+
+ vmmdevR3HgcmCmdListUnlock(pThisCC);
+ return rc;
+}
+
+/**
+ * Find a HGCM command by its physical address.
+ *
+ * The caller is responsible for taking the command list lock before calling
+ * this function.
+ *
+ * @returns Pointer to the command on success, NULL otherwise.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param GCPhys The physical address of the command we're looking for.
+ */
+DECLINLINE(PVBOXHGCMCMD) vmmdevR3HgcmFindCommandLocked(PVMMDEVCC pThisCC, RTGCPHYS GCPhys)
+{
+ PVBOXHGCMCMD pCmd;
+ RTListForEach(&pThisCC->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
+ {
+ if (pCmd->GCPhys == GCPhys)
+ return pCmd;
+ }
+ return NULL;
+}
+
+/** Copy VMMDevHGCMConnect request data from the guest to VBOXHGCMCMD command.
+ *
+ * @param pHGCMConnect The source guest request (cached in host memory).
+ * @param pCmd Destination command.
+ */
+static void vmmdevR3HgcmConnectFetch(const VMMDevHGCMConnect *pHGCMConnect, PVBOXHGCMCMD pCmd)
+{
+ pCmd->enmRequestType = pHGCMConnect->header.header.requestType;
+ pCmd->u.connect.u32ClientID = pHGCMConnect->u32ClientID;
+ *pCmd->u.connect.pLoc = pHGCMConnect->loc;
+}
+
+/** Handle VMMDevHGCMConnect request.
+ *
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pHGCMConnect The guest request (cached in host memory).
+ * @param GCPhys The physical address of the request.
+ */
+int vmmdevR3HgcmConnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
+ const VMMDevHGCMConnect *pHGCMConnect, RTGCPHYS GCPhys)
+{
+ int rc;
+ PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_CONNECT, GCPhys, pHGCMConnect->header.header.size, 0,
+ pHGCMConnect->header.header.fRequestor);
+ if (pCmd)
+ {
+ vmmdevR3HgcmConnectFetch(pHGCMConnect, pCmd);
+
+ /* Only allow the guest to use existing services! */
+ ASSERT_GUEST(pHGCMConnect->loc.type == VMMDevHGCMLoc_LocalHost_Existing);
+ pCmd->u.connect.pLoc->type = VMMDevHGCMLoc_LocalHost_Existing;
+
+ vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
+ rc = pThisCC->pHGCMDrv->pfnConnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.connect.pLoc, &pCmd->u.connect.u32ClientID);
+ if (RT_FAILURE(rc))
+ vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+/** Copy VMMDevHGCMDisconnect request data from the guest to VBOXHGCMCMD command.
+ *
+ * @param pHGCMDisconnect The source guest request (cached in host memory).
+ * @param pCmd Destination command.
+ */
+static void vmmdevR3HgcmDisconnectFetch(const VMMDevHGCMDisconnect *pHGCMDisconnect, PVBOXHGCMCMD pCmd)
+{
+ pCmd->enmRequestType = pHGCMDisconnect->header.header.requestType;
+ pCmd->u.disconnect.u32ClientID = pHGCMDisconnect->u32ClientID;
+}
+
+/** Handle VMMDevHGCMDisconnect request.
+ *
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pHGCMDisconnect The guest request (cached in host memory).
+ * @param GCPhys The physical address of the request.
+ */
+int vmmdevR3HgcmDisconnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
+ const VMMDevHGCMDisconnect *pHGCMDisconnect, RTGCPHYS GCPhys)
+{
+ int rc;
+ PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_DISCONNECT, GCPhys, pHGCMDisconnect->header.header.size, 0,
+ pHGCMDisconnect->header.header.fRequestor);
+ if (pCmd)
+ {
+ vmmdevR3HgcmDisconnectFetch(pHGCMDisconnect, pCmd);
+
+ vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
+ rc = pThisCC->pHGCMDrv->pfnDisconnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
+ if (RT_FAILURE(rc))
+ vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+/** Translate LinAddr parameter type to the direction of data transfer.
+ *
+ * @returns VBOX_HGCM_F_PARM_DIRECTION_* flags.
+ * @param enmType Type of the LinAddr parameter.
+ */
+static uint32_t vmmdevR3HgcmParmTypeToDirection(HGCMFunctionParameterType enmType)
+{
+ if (enmType == VMMDevHGCMParmType_LinAddr_In) return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ if (enmType == VMMDevHGCMParmType_LinAddr_Out) return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+}
+
+/** Check if list of pages in a HGCM pointer parameter corresponds to a contiguous buffer.
+ *
+ * @returns true if pages are contiguous, false otherwise.
+ * @param pPtr Information about a pointer HGCM parameter.
+ */
+DECLINLINE(bool) vmmdevR3HgcmGuestBufferIsContiguous(const VBOXHGCMPARMPTR *pPtr)
+{
+ if (pPtr->cPages == 1)
+ return true;
+ RTGCPHYS64 Phys = pPtr->paPages[0] + GUEST_PAGE_SIZE;
+ if (Phys != pPtr->paPages[1])
+ return false;
+ if (pPtr->cPages > 2)
+ {
+ uint32_t iPage = 2;
+ do
+ {
+ Phys += GUEST_PAGE_SIZE;
+ if (Phys != pPtr->paPages[iPage])
+ return false;
+ ++iPage;
+ } while (iPage < pPtr->cPages);
+ }
+ return true;
+}
+
+/** Copy data from guest memory to the host buffer.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance for PDMDevHlp.
+ * @param pvDst The destination host buffer.
+ * @param cbDst Size of the destination host buffer.
+ * @param pPtr Description of the source HGCM pointer parameter.
+ */
+static int vmmdevR3HgcmGuestBufferRead(PPDMDEVINSR3 pDevIns, void *pvDst, uint32_t cbDst, const VBOXHGCMPARMPTR *pPtr)
+{
+ /*
+ * Try detect contiguous buffers.
+ */
+ /** @todo We need a flag for indicating this. */
+ if (vmmdevR3HgcmGuestBufferIsContiguous(pPtr))
+ return PDMDevHlpPhysRead(pDevIns, pPtr->paPages[0] | pPtr->offFirstPage, pvDst, cbDst);
+
+ /*
+ * Page by page fallback.
+ */
+ uint8_t *pu8Dst = (uint8_t *)pvDst;
+ uint32_t offPage = pPtr->offFirstPage;
+ uint32_t cbRemaining = cbDst;
+
+ for (uint32_t iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
+ {
+ uint32_t cbToRead = GUEST_PAGE_SIZE - offPage;
+ if (cbToRead > cbRemaining)
+ cbToRead = cbRemaining;
+
+ /* Skip invalid pages. */
+ const RTGCPHYS GCPhys = pPtr->paPages[iPage];
+ if (GCPhys != NIL_RTGCPHYS)
+ {
+ int rc = PDMDevHlpPhysRead(pDevIns, GCPhys + offPage, pu8Dst, cbToRead);
+ AssertMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp offPage=%#x cbToRead=%#x\n", rc, GCPhys, offPage, cbToRead), rc);
+ }
+
+ offPage = 0; /* A next page is read from 0 offset. */
+ cbRemaining -= cbToRead;
+ pu8Dst += cbToRead;
+ }
+
+ return VINF_SUCCESS;
+}
+
+/** Copy data from the host buffer to guest memory.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance for PDMDevHlp.
+ * @param pPtr Description of the destination HGCM pointer parameter.
+ * @param pvSrc The source host buffer.
+ * @param cbSrc Size of the source host buffer.
+ */
+static int vmmdevR3HgcmGuestBufferWrite(PPDMDEVINSR3 pDevIns, const VBOXHGCMPARMPTR *pPtr, const void *pvSrc, uint32_t cbSrc)
+{
+ int rc = VINF_SUCCESS;
+
+ uint8_t *pu8Src = (uint8_t *)pvSrc;
+ uint32_t offPage = pPtr->offFirstPage;
+ uint32_t cbRemaining = RT_MIN(cbSrc, pPtr->cbData);
+
+ uint32_t iPage;
+ for (iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
+ {
+ uint32_t cbToWrite = GUEST_PAGE_SIZE - offPage;
+ if (cbToWrite > cbRemaining)
+ cbToWrite = cbRemaining;
+
+ /* Skip invalid pages. */
+ const RTGCPHYS GCPhys = pPtr->paPages[iPage];
+ if (GCPhys != NIL_RTGCPHYS)
+ {
+ rc = PDMDevHlpPhysWrite(pDevIns, GCPhys + offPage, pu8Src, cbToWrite);
+ AssertRCBreak(rc);
+ }
+
+ offPage = 0; /* A next page is written at 0 offset. */
+ cbRemaining -= cbToWrite;
+ pu8Src += cbToWrite;
+ }
+
+ return rc;
+}
+
+/** Initializes pCmd->paHostParms from already initialized pCmd->paGuestParms.
+ * Allocates memory for pointer parameters and copies data from the guest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pCmd Command structure where host parameters needs initialization.
+ * @param pbReq The request buffer.
+ */
+static int vmmdevR3HgcmInitHostParameters(PPDMDEVINS pDevIns, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, uint8_t const *pbReq)
+{
+ AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
+
+ for (uint32_t i = 0; i < pCmd->u.call.cParms; ++i)
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+ VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
+
+ switch (pGuestParm->enmType)
+ {
+ case VMMDevHGCMParmType_32bit:
+ {
+ pHostParm->type = VBOX_HGCM_SVC_PARM_32BIT;
+ pHostParm->u.uint32 = (uint32_t)pGuestParm->u.val.u64Value;
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_64bit:
+ {
+ pHostParm->type = VBOX_HGCM_SVC_PARM_64BIT;
+ pHostParm->u.uint64 = pGuestParm->u.val.u64Value;
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_PageList:
+ case VMMDevHGCMParmType_LinAddr_In:
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ case VMMDevHGCMParmType_Embedded:
+ case VMMDevHGCMParmType_ContiguousPageList:
+ {
+ const uint32_t cbData = pGuestParm->u.ptr.cbData;
+
+ pHostParm->type = VBOX_HGCM_SVC_PARM_PTR;
+ pHostParm->u.pointer.size = cbData;
+
+ if (cbData)
+ {
+ /* Zero memory, the buffer content is potentially copied to the guest. */
+ void *pv = vmmdevR3HgcmCallMemAllocZ(pThisCC, pCmd, cbData);
+ AssertReturn(pv, VERR_NO_MEMORY);
+ pHostParm->u.pointer.addr = pv;
+
+ if (pGuestParm->u.ptr.fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_TO_HOST)
+ {
+ if (pGuestParm->enmType != VMMDevHGCMParmType_Embedded)
+ {
+ if (pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList)
+ {
+ int rc = vmmdevR3HgcmGuestBufferRead(pDevIns, pv, cbData, &pGuestParm->u.ptr);
+ ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ }
+ else
+ {
+ int rc = PDMDevHlpPhysRead(pDevIns,
+ pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
+ pv, cbData);
+ ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ }
+ }
+ else
+ {
+ memcpy(pv, &pbReq[pGuestParm->u.ptr.offFirstPage], cbData);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ }
+ }
+ }
+ else
+ {
+ pHostParm->u.pointer.addr = NULL;
+ }
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_NoBouncePageList:
+ {
+ pHostParm->type = VBOX_HGCM_SVC_PARM_PAGES;
+ pHostParm->u.Pages.cb = pGuestParm->u.Pages.cbData;
+ pHostParm->u.Pages.cPages = pGuestParm->u.Pages.cPages;
+ pHostParm->u.Pages.papvPages = (void **)&pGuestParm->u.Pages.paPgLocks[pGuestParm->u.Pages.cPages];
+
+ break;
+ }
+
+ default:
+ ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/** Allocate and initialize VBOXHGCMCMD structure for a HGCMCall request.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pHGCMCall The HGCMCall request (cached in host memory).
+ * @param cbHGCMCall Size of the request.
+ * @param GCPhys Guest physical address of the request.
+ * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
+ * @param ppCmd Where to store pointer to allocated command.
+ * @param pcbHGCMParmStruct Where to store size of used HGCM parameter structure.
+ */
+static int vmmdevR3HgcmCallAlloc(PVMMDEVCC pThisCC, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPhys,
+ VMMDevRequestType enmRequestType, PVBOXHGCMCMD *ppCmd, uint32_t *pcbHGCMParmStruct)
+{
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ const uint32_t cbHGCMParmStruct = enmRequestType == VMMDevReq_HGCMCall64 ? sizeof(HGCMFunctionParameter64)
+ : sizeof(HGCMFunctionParameter32);
+#else
+ const uint32_t cbHGCMParmStruct = sizeof(HGCMFunctionParameter);
+#endif
+
+ const uint32_t cParms = pHGCMCall->cParms;
+
+ /* Whether there is enough space for parameters and sane upper limit. */
+ ASSERT_GUEST_STMT_RETURN( cParms <= (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct
+ && cParms <= VMMDEV_MAX_HGCM_PARMS,
+ LogRelMax(50, ("VMMDev: request packet with invalid number of HGCM parameters: %d vs %d. Refusing operation.\n",
+ (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct, cParms)),
+ VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_CALL, GCPhys, cbHGCMCall, cParms,
+ pHGCMCall->header.header.fRequestor);
+ if (pCmd == NULL)
+ return VERR_NO_MEMORY;
+
+ /* Request type has been validated in vmmdevReqDispatcher. */
+ pCmd->enmRequestType = enmRequestType;
+ pCmd->u.call.u32ClientID = pHGCMCall->u32ClientID;
+ pCmd->u.call.u32Function = pHGCMCall->u32Function;
+
+ *ppCmd = pCmd;
+ *pcbHGCMParmStruct = cbHGCMParmStruct;
+ return VINF_SUCCESS;
+}
+
+/**
+ * Heap budget wrapper around RTMemAlloc and RTMemAllocZ.
+ */
+static void *vmmdevR3HgcmCallMemAllocEx(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested, bool fZero)
+{
+ uintptr_t idx = pCmd->idxHeapAcc;
+ AssertStmt(idx < RT_ELEMENTS(pThisCC->aHgcmAcc), idx %= RT_ELEMENTS(pThisCC->aHgcmAcc));
+
+ /* Check against max heap costs for this request. */
+ Assert(pCmd->cbHeapCost <= VMMDEV_MAX_HGCM_DATA_SIZE);
+ if (cbRequested <= VMMDEV_MAX_HGCM_DATA_SIZE - pCmd->cbHeapCost)
+ {
+ /* Check heap budget (we're under lock). */
+ if (cbRequested <= pThisCC->aHgcmAcc[idx].cbHeapBudget)
+ {
+ /* Do the actual allocation. */
+ void *pv = fZero ? RTMemAllocZ(cbRequested) : RTMemAlloc(cbRequested);
+ if (pv)
+ {
+ /* Update the request cost and heap budget. */
+ Log5Func(("aHgcmAcc[%zu] %#RX64 += %#x (%p)\n", idx, pThisCC->aHgcmAcc[idx].cbHeapBudget, cbRequested, pCmd));
+ pThisCC->aHgcmAcc[idx].cbHeapBudget -= cbRequested;
+ pCmd->cbHeapCost += (uint32_t)cbRequested;
+ return pv;
+ }
+ LogFunc(("Heap alloc failed: cbRequested=%#zx - enmCmdType=%d\n", cbRequested, pCmd->enmCmdType));
+ }
+ else
+ LogFunc(("Heap budget overrun: cbRequested=%#zx cbHeapCost=%#x aHgcmAcc[%u].cbHeapBudget=%#RX64 - enmCmdType=%d\n",
+ cbRequested, pCmd->cbHeapCost, pCmd->idxHeapAcc, pThisCC->aHgcmAcc[idx].cbHeapBudget, pCmd->enmCmdType));
+ }
+ else
+ LogFunc(("Request too big: cbRequested=%#zx cbHeapCost=%#x - enmCmdType=%d\n",
+ cbRequested, pCmd->cbHeapCost, pCmd->enmCmdType));
+ STAM_REL_COUNTER_INC(&pThisCC->aHgcmAcc[idx].StatBudgetOverruns);
+ return NULL;
+}
+
+/**
+ * Heap budget wrapper around RTMemAlloc.
+ */
+DECLINLINE(void *) vmmdevR3HgcmCallMemAlloc(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested)
+{
+ return vmmdevR3HgcmCallMemAllocEx(pThisCC, pCmd, cbRequested, false /*fZero*/);
+}
+
+/**
+ * Heap budget wrapper around RTMemAllocZ.
+ */
+DECLINLINE(void *) vmmdevR3HgcmCallMemAllocZ(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested)
+{
+ return vmmdevR3HgcmCallMemAllocEx(pThisCC, pCmd, cbRequested, true /*fZero*/);
+}
+
+/** Copy VMMDevHGCMCall request data from the guest to VBOXHGCMCMD command.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pCmd The destination command.
+ * @param pHGCMCall The HGCMCall request (cached in host memory).
+ * @param cbHGCMCall Size of the request.
+ * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
+ * @param cbHGCMParmStruct Size of used HGCM parameter structure.
+ */
+static int vmmdevR3HgcmCallFetchGuestParms(PPDMDEVINS pDevIns, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd,
+ const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall,
+ VMMDevRequestType enmRequestType, uint32_t cbHGCMParmStruct)
+{
+ /*
+ * Go over all guest parameters and initialize relevant VBOXHGCMCMD fields.
+ * VBOXHGCMCMD must contain all information about the request,
+ * the request will be not read from the guest memory again.
+ */
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ const bool f64Bits = (enmRequestType == VMMDevReq_HGCMCall64);
+#endif
+
+ const uint32_t cParms = pCmd->u.call.cParms;
+
+ /* Offsets in the request buffer to HGCM parameters and additional data. */
+ const uint32_t offHGCMParms = sizeof(VMMDevHGCMCall);
+ const uint32_t offExtra = offHGCMParms + cParms * cbHGCMParmStruct;
+
+ /* Pointer to the next HGCM parameter of the request. */
+ const uint8_t *pu8HGCMParm = (uint8_t *)pHGCMCall + offHGCMParms;
+
+ for (uint32_t i = 0; i < cParms; ++i, pu8HGCMParm += cbHGCMParmStruct)
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, type, HGCMFunctionParameter32, type);
+ pGuestParm->enmType = ((HGCMFunctionParameter64 *)pu8HGCMParm)->type;
+#else
+ pGuestParm->enmType = ((HGCMFunctionParameter *)pu8HGCMParm)->type;
+#endif
+
+ switch (pGuestParm->enmType)
+ {
+ case VMMDevHGCMParmType_32bit:
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value32, HGCMFunctionParameter32, u.value32);
+ uint32_t *pu32 = &((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value32;
+#else
+ uint32_t *pu32 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value32;
+#endif
+ LogFunc(("uint32 guest parameter %RI32\n", *pu32));
+
+ pGuestParm->u.val.u64Value = *pu32;
+ pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu32 - (uintptr_t)pHGCMCall);
+ pGuestParm->u.val.cbValue = sizeof(uint32_t);
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_64bit:
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value64, HGCMFunctionParameter32, u.value64);
+ uint64_t *pu64 = (uint64_t *)(uintptr_t)&((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value64; /* MSC detect misalignment, thus casts. */
+#else
+ uint64_t *pu64 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value64;
+#endif
+ LogFunc(("uint64 guest parameter %RI64\n", *pu64));
+
+ pGuestParm->u.val.u64Value = *pu64;
+ pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu64 - (uintptr_t)pHGCMCall);
+ pGuestParm->u.val.cbValue = sizeof(uint64_t);
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_LinAddr_In: /* In (read) */
+ case VMMDevHGCMParmType_LinAddr_Out: /* Out (write) */
+ case VMMDevHGCMParmType_LinAddr: /* In & Out */
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ uint32_t cbData = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.size
+ : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.size;
+ RTGCPTR GCPtr = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.u.linearAddr
+ : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.u.linearAddr;
+#else
+ uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.size;
+ RTGCPTR GCPtr = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.u.linearAddr;
+#endif
+ LogFunc(("LinAddr guest parameter %RGv, cb %u\n", GCPtr, cbData));
+
+ ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
+
+ const uint32_t offFirstPage = cbData > 0 ? GCPtr & GUEST_PAGE_OFFSET_MASK : 0;
+ const uint32_t cPages = cbData > 0 ? (offFirstPage + cbData + GUEST_PAGE_SIZE - 1) / GUEST_PAGE_SIZE : 0;
+
+ pGuestParm->u.ptr.cbData = cbData;
+ pGuestParm->u.ptr.offFirstPage = offFirstPage;
+ pGuestParm->u.ptr.cPages = cPages;
+ pGuestParm->u.ptr.fu32Direction = vmmdevR3HgcmParmTypeToDirection(pGuestParm->enmType);
+
+ if (cbData > 0)
+ {
+ if (cPages == 1)
+ pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
+ else
+ {
+ /* (Max 262144 bytes with current limits.) */
+ pGuestParm->u.ptr.paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd,
+ cPages * sizeof(RTGCPHYS));
+ AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
+ }
+
+ /* Gonvert the guest linear pointers of pages to physical addresses. */
+ GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
+ for (uint32_t iPage = 0; iPage < cPages; ++iPage)
+ {
+ /* The guest might specify invalid GCPtr, just skip such addresses.
+ * Also if the guest parameters are fetched when restoring an old saved state,
+ * then GCPtr may become invalid and do not have a corresponding GCPhys.
+ * The command restoration routine will take care of this.
+ */
+ RTGCPHYS GCPhys;
+ int rc2 = PDMDevHlpPhysGCPtr2GCPhys(pDevIns, GCPtr, &GCPhys);
+ if (RT_FAILURE(rc2))
+ GCPhys = NIL_RTGCPHYS;
+ LogFunc(("Page %d: %RGv -> %RGp. %Rrc\n", iPage, GCPtr, GCPhys, rc2));
+
+ pGuestParm->u.ptr.paPages[iPage] = GCPhys;
+ GCPtr += GUEST_PAGE_SIZE;
+ }
+ }
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_PageList:
+ case VMMDevHGCMParmType_ContiguousPageList:
+ case VMMDevHGCMParmType_NoBouncePageList:
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.offset, HGCMFunctionParameter32, u.PageList.offset);
+ uint32_t cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.size;
+ uint32_t offPageListInfo = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.offset;
+#else
+ uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.size;
+ uint32_t offPageListInfo = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.offset;
+#endif
+ LogFunc(("PageList guest parameter cb %u, offset %u\n", cbData, offPageListInfo));
+
+ ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
+
+/** @todo respect zero byte page lists... */
+ /* Check that the page list info is within the request. */
+ ASSERT_GUEST_RETURN( offPageListInfo >= offExtra
+ && cbHGCMCall >= sizeof(HGCMPageListInfo)
+ && offPageListInfo <= cbHGCMCall - sizeof(HGCMPageListInfo),
+ VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /* The HGCMPageListInfo structure is within the request. */
+ const HGCMPageListInfo *pPageListInfo = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offPageListInfo);
+
+ /* Enough space for page pointers? */
+ const uint32_t cMaxPages = 1 + (cbHGCMCall - offPageListInfo - sizeof(HGCMPageListInfo)) / sizeof(RTGCPHYS);
+ ASSERT_GUEST_RETURN( pPageListInfo->cPages > 0
+ && pPageListInfo->cPages <= cMaxPages,
+ VERR_INVALID_PARAMETER);
+
+ /* Flags. */
+ ASSERT_GUEST_MSG_RETURN(VBOX_HGCM_F_PARM_ARE_VALID(pPageListInfo->flags),
+ ("%#x\n", pPageListInfo->flags), VERR_INVALID_FLAGS);
+ /* First page offset. */
+ ASSERT_GUEST_MSG_RETURN(pPageListInfo->offFirstPage < GUEST_PAGE_SIZE,
+ ("%#x\n", pPageListInfo->offFirstPage), VERR_INVALID_PARAMETER);
+
+ /* Contiguous page lists only ever have a single page and
+ no-bounce page list requires cPages to match the size exactly.
+ Plain page list does not impose any restrictions on cPages currently. */
+ ASSERT_GUEST_MSG_RETURN( pPageListInfo->cPages
+ == (pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList ? 1
+ : RT_ALIGN_32(pPageListInfo->offFirstPage + cbData, GUEST_PAGE_SIZE)
+ >> GUEST_PAGE_SHIFT)
+ || pGuestParm->enmType == VMMDevHGCMParmType_PageList,
+ ("offFirstPage=%#x cbData=%#x cPages=%#x enmType=%d\n",
+ pPageListInfo->offFirstPage, cbData, pPageListInfo->cPages, pGuestParm->enmType),
+ VERR_INVALID_PARAMETER);
+
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /*
+ * Deal with no-bounce buffers first, as
+ * VMMDevHGCMParmType_PageList is the fallback.
+ */
+ if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
+ {
+ /* Validate page offsets */
+ ASSERT_GUEST_MSG_RETURN( !(pPageListInfo->aPages[0] & GUEST_PAGE_OFFSET_MASK)
+ || (pPageListInfo->aPages[0] & GUEST_PAGE_OFFSET_MASK) == pPageListInfo->offFirstPage,
+ ("%#RX64 offFirstPage=%#x\n", pPageListInfo->aPages[0], pPageListInfo->offFirstPage),
+ VERR_INVALID_POINTER);
+ uint32_t const cPages = pPageListInfo->cPages;
+ for (uint32_t iPage = 1; iPage < cPages; iPage++)
+ ASSERT_GUEST_MSG_RETURN(!(pPageListInfo->aPages[iPage] & GUEST_PAGE_OFFSET_MASK),
+ ("[%#zx]=%#RX64\n", iPage, pPageListInfo->aPages[iPage]), VERR_INVALID_POINTER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ pGuestParm->u.Pages.cbData = cbData;
+ pGuestParm->u.Pages.offFirstPage = pPageListInfo->offFirstPage;
+ pGuestParm->u.Pages.fFlags = pPageListInfo->flags;
+ pGuestParm->u.Pages.cPages = (uint16_t)cPages;
+ pGuestParm->u.Pages.fLocked = false;
+ pGuestParm->u.Pages.paPgLocks = (PPGMPAGEMAPLOCK)vmmdevR3HgcmCallMemAllocZ(pThisCC, pCmd,
+ ( sizeof(PGMPAGEMAPLOCK)
+ + sizeof(void *)) * cPages);
+ AssertReturn(pGuestParm->u.Pages.paPgLocks, VERR_NO_MEMORY);
+
+ /* Make sure the page offsets are sensible. */
+ int rc = VINF_SUCCESS;
+ void **papvPages = (void **)&pGuestParm->u.Pages.paPgLocks[cPages];
+ if (pPageListInfo->flags & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST)
+ rc = PDMDevHlpPhysBulkGCPhys2CCPtr(pDevIns, cPages, pPageListInfo->aPages, 0 /*fFlags*/,
+ papvPages, pGuestParm->u.Pages.paPgLocks);
+ else
+ rc = PDMDevHlpPhysBulkGCPhys2CCPtrReadOnly(pDevIns, cPages, pPageListInfo->aPages, 0 /*fFlags*/,
+ (void const **)papvPages, pGuestParm->u.Pages.paPgLocks);
+ if (RT_SUCCESS(rc))
+ {
+ papvPages[0] = (void *)((uintptr_t)papvPages[0] | pPageListInfo->offFirstPage);
+ pGuestParm->u.Pages.fLocked = true;
+ break;
+ }
+
+ /* Locking failed, bail out. In case of MMIO we fall back on regular page list handling. */
+ RTMemFree(pGuestParm->u.Pages.paPgLocks);
+ pGuestParm->u.Pages.paPgLocks = NULL;
+ STAM_REL_COUNTER_INC(&pThisCC->StatHgcmFailedPageListLocking);
+ ASSERT_GUEST_MSG_RETURN(rc == VERR_PGM_PHYS_PAGE_RESERVED, ("cPages=%u %Rrc\n", cPages, rc), rc);
+ pGuestParm->enmType = VMMDevHGCMParmType_PageList;
+ }
+
+ /*
+ * Regular page list or contiguous page list.
+ */
+ pGuestParm->u.ptr.cbData = cbData;
+ pGuestParm->u.ptr.offFirstPage = pPageListInfo->offFirstPage;
+ pGuestParm->u.ptr.cPages = pPageListInfo->cPages;
+ pGuestParm->u.ptr.fu32Direction = pPageListInfo->flags;
+ if (pPageListInfo->cPages == 1)
+ {
+ pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
+ pGuestParm->u.ptr.GCPhysSinglePage = pPageListInfo->aPages[0];
+ }
+ else
+ {
+ pGuestParm->u.ptr.paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd,
+ pPageListInfo->cPages * sizeof(RTGCPHYS));
+ AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
+
+ for (uint32_t iPage = 0; iPage < pGuestParm->u.ptr.cPages; ++iPage)
+ pGuestParm->u.ptr.paPages[iPage] = pPageListInfo->aPages[iPage];
+ }
+ break;
+ }
+
+ case VMMDevHGCMParmType_Embedded:
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.Embedded.cbData, HGCMFunctionParameter32, u.Embedded.cbData);
+ uint32_t const cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.cbData;
+ uint32_t const offData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.offData;
+ uint32_t const fFlags = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.fFlags;
+#else
+ uint32_t const cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.cbData;
+ uint32_t const offData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.offData;
+ uint32_t const fFlags = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.fFlags;
+#endif
+ LogFunc(("Embedded guest parameter cb %u, offset %u, flags %#x\n", cbData, offData, fFlags));
+
+ ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
+
+ /* Check flags and buffer range. */
+ ASSERT_GUEST_MSG_RETURN(VBOX_HGCM_F_PARM_ARE_VALID(fFlags), ("%#x\n", fFlags), VERR_INVALID_FLAGS);
+ ASSERT_GUEST_MSG_RETURN( offData >= offExtra
+ && offData <= cbHGCMCall
+ && cbData <= cbHGCMCall - offData,
+ ("offData=%#x cbData=%#x cbHGCMCall=%#x offExtra=%#x\n", offData, cbData, cbHGCMCall, offExtra),
+ VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /* We use part of the ptr member. */
+ pGuestParm->u.ptr.fu32Direction = fFlags;
+ pGuestParm->u.ptr.cbData = cbData;
+ pGuestParm->u.ptr.offFirstPage = offData;
+ pGuestParm->u.ptr.GCPhysSinglePage = pCmd->GCPhys + offData;
+ pGuestParm->u.ptr.cPages = 1;
+ pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
+ break;
+ }
+
+ default:
+ ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Handles VMMDevHGCMCall request.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pHGCMCall The request to handle (cached in host memory).
+ * @param cbHGCMCall Size of the entire request (including HGCM parameters).
+ * @param GCPhys The guest physical address of the request.
+ * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
+ * @param tsArrival The STAM_GET_TS() value when the request arrived.
+ * @param ppLock Pointer to the lock info pointer (latter can be
+ * NULL). Set to NULL if HGCM takes lock ownership.
+ */
+int vmmdevR3HgcmCall(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall,
+ RTGCPHYS GCPhys, VMMDevRequestType enmRequestType, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock)
+{
+ LogFunc(("client id = %d, function = %d, cParms = %d, enmRequestType = %d, fRequestor = %#x\n", pHGCMCall->u32ClientID,
+ pHGCMCall->u32Function, pHGCMCall->cParms, enmRequestType, pHGCMCall->header.header.fRequestor));
+
+ /*
+ * Validation.
+ */
+ ASSERT_GUEST_RETURN(cbHGCMCall >= sizeof(VMMDevHGCMCall), VERR_INVALID_PARAMETER);
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ ASSERT_GUEST_RETURN( enmRequestType == VMMDevReq_HGCMCall32
+ || enmRequestType == VMMDevReq_HGCMCall64, VERR_INVALID_PARAMETER);
+#else
+ ASSERT_GUEST_RETURN(enmRequestType == VMMDevReq_HGCMCall32, VERR_INVALID_PARAMETER);
+#endif
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /*
+ * Create a command structure.
+ */
+ PVBOXHGCMCMD pCmd;
+ uint32_t cbHGCMParmStruct;
+ int rc = vmmdevR3HgcmCallAlloc(pThisCC, pHGCMCall, cbHGCMCall, GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
+ if (RT_SUCCESS(rc))
+ {
+ pCmd->tsArrival = tsArrival;
+ PVMMDEVREQLOCK pLock = *ppLock;
+ if (pLock)
+ {
+ pCmd->ReqMapLock = pLock->Lock;
+ pCmd->pvReqLocked = pLock->pvReq;
+ *ppLock = NULL;
+ }
+
+ rc = vmmdevR3HgcmCallFetchGuestParms(pDevIns, pThisCC, pCmd, pHGCMCall, cbHGCMCall, enmRequestType, cbHGCMParmStruct);
+ if (RT_SUCCESS(rc))
+ {
+ /* Copy guest data to host parameters, so HGCM services can use the data. */
+ rc = vmmdevR3HgcmInitHostParameters(pDevIns, pThisCC, pCmd, (uint8_t const *)pHGCMCall);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Pass the function call to HGCM connector for actual processing
+ */
+ vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
+
+#if 0 /* DONT ENABLE - for performance hacking. */
+ if ( pCmd->u.call.u32Function == 9
+ && pCmd->u.call.cParms == 5)
+ {
+ vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
+
+ if (pCmd->pvReqLocked)
+ {
+ VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
+ pHeader->header.rc = VINF_SUCCESS;
+ pHeader->result = VINF_SUCCESS;
+ pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
+ }
+ else
+ {
+ VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)pHGCMCall;
+ pHeader->header.rc = VINF_SUCCESS;
+ pHeader->result = VINF_SUCCESS;
+ pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
+ PDMDevHlpPhysWrite(pDevIns, GCPhys, pHeader, sizeof(*pHeader));
+ }
+ vmmdevR3HgcmCmdFree(pDevIns, pThisCC, pCmd);
+ return VINF_HGCM_ASYNC_EXECUTE; /* ignored, but avoids assertions. */
+ }
+#endif
+
+ rc = pThisCC->pHGCMDrv->pfnCall(pThisCC->pHGCMDrv, pCmd,
+ pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
+ pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsArrival);
+
+ if (rc == VINF_HGCM_ASYNC_EXECUTE)
+ {
+ /*
+ * Done. Just update statistics and return.
+ */
+#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
+ uint64_t tsNow;
+ STAM_GET_TS(tsNow);
+ STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->StatHgcmCmdArrival, tsNow - tsArrival);
+#endif
+ return rc;
+ }
+
+ /*
+ * Failed, bail out.
+ */
+ LogFunc(("pfnCall rc = %Rrc\n", rc));
+ vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
+ }
+ }
+ vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
+ }
+ return rc;
+}
+
+/**
+ * VMMDevReq_HGCMCancel worker.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pHGCMCancel The request to handle (cached in host memory).
+ * @param GCPhys The address of the request.
+ *
+ * @thread EMT
+ */
+int vmmdevR3HgcmCancel(PVMMDEVCC pThisCC, const VMMDevHGCMCancel *pHGCMCancel, RTGCPHYS GCPhys)
+{
+ NOREF(pHGCMCancel);
+ int rc = vmmdevR3HgcmCancel2(pThisCC, GCPhys);
+ return rc == VERR_NOT_FOUND ? VERR_INVALID_PARAMETER : rc;
+}
+
+/**
+ * VMMDevReq_HGCMCancel2 worker.
+ *
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_NOT_FOUND if the request was not found.
+ * @retval VERR_INVALID_PARAMETER if the request address is invalid.
+ *
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param GCPhys The address of the request that should be cancelled.
+ *
+ * @thread EMT
+ */
+int vmmdevR3HgcmCancel2(PVMMDEVCC pThisCC, RTGCPHYS GCPhys)
+{
+ if ( GCPhys == 0
+ || GCPhys == NIL_RTGCPHYS
+ || GCPhys == NIL_RTGCPHYS32)
+ {
+ Log(("vmmdevR3HgcmCancel2: GCPhys=%#x\n", GCPhys));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Locate the command and cancel it while under the protection of
+ * the lock. hgcmCompletedWorker makes assumptions about this.
+ */
+ int rc = vmmdevR3HgcmCmdListLock(pThisCC);
+ AssertRCReturn(rc, rc);
+
+ PVBOXHGCMCMD pCmd = vmmdevR3HgcmFindCommandLocked(pThisCC, GCPhys);
+ if (pCmd)
+ {
+ pCmd->fCancelled = true;
+
+ Log(("vmmdevR3HgcmCancel2: Cancelled pCmd=%p / GCPhys=%#x\n", pCmd, GCPhys));
+ if (pThisCC->pHGCMDrv)
+ pThisCC->pHGCMDrv->pfnCancelled(pThisCC->pHGCMDrv, pCmd,
+ pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.u32ClientID
+ : pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? pCmd->u.connect.u32ClientID
+ : pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT ? pCmd->u.disconnect.u32ClientID
+ : 0);
+ }
+ else
+ rc = VERR_NOT_FOUND;
+
+ vmmdevR3HgcmCmdListUnlock(pThisCC);
+ return rc;
+}
+
+/** Write HGCM call parameters and buffers back to the guest request and memory.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pCmd Completed call command.
+ * @param pHGCMCall The guestrequest which needs updating (cached in the host memory).
+ * @param pbReq The request copy or locked memory for handling
+ * embedded buffers.
+ */
+static int vmmdevR3HgcmCompleteCallRequest(PPDMDEVINS pDevIns, PVBOXHGCMCMD pCmd, VMMDevHGCMCall *pHGCMCall, uint8_t *pbReq)
+{
+ AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
+
+ /*
+ * Go over parameter descriptions saved in pCmd.
+ */
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ HGCMFunctionParameter64 *pReqParm = (HGCMFunctionParameter64 *)(pbReq + sizeof(VMMDevHGCMCall));
+ size_t const cbHGCMParmStruct = pCmd->enmRequestType == VMMDevReq_HGCMCall64
+ ? sizeof(HGCMFunctionParameter64) : sizeof(HGCMFunctionParameter32);
+#else
+ HGCMFunctionParameter *pReqParm = (HGCMFunctionParameter *)(pbReq + sizeof(VMMDevHGCMCall));
+ size_t const cbHGCMParmStruct = sizeof(HGCMFunctionParameter);
+#endif
+ for (uint32_t i = 0;
+ i < pCmd->u.call.cParms;
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ ++i, pReqParm = (HGCMFunctionParameter64 *)((uint8_t *)pReqParm + cbHGCMParmStruct)
+#else
+ ++i, pReqParm = (HGCMFunctionParameter *)((uint8_t *)pReqParm + cbHGCMParmStruct)
+#endif
+ )
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+ VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
+
+ const HGCMFunctionParameterType enmType = pGuestParm->enmType;
+ switch (enmType)
+ {
+ case VMMDevHGCMParmType_32bit:
+ case VMMDevHGCMParmType_64bit:
+ {
+ const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
+ const void *pvSrc = enmType == VMMDevHGCMParmType_32bit ? (void *)&pHostParm->u.uint32
+ : (void *)&pHostParm->u.uint64;
+/** @todo optimize memcpy away here. */
+ memcpy((uint8_t *)pHGCMCall + pVal->offValue, pvSrc, pVal->cbValue);
+ break;
+ }
+
+ case VMMDevHGCMParmType_LinAddr_In:
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ case VMMDevHGCMParmType_PageList:
+ {
+/** @todo Update the return buffer size? */
+ const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+ if ( pPtr->cbData > 0
+ && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
+ {
+ const void *pvSrc = pHostParm->u.pointer.addr;
+ uint32_t cbSrc = pHostParm->u.pointer.size;
+ int rc = vmmdevR3HgcmGuestBufferWrite(pDevIns, pPtr, pvSrc, cbSrc);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ break;
+ }
+
+ case VMMDevHGCMParmType_Embedded:
+ {
+ const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+
+ /* Update size. */
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.Embedded.cbData, HGCMFunctionParameter32, u.Embedded.cbData);
+#endif
+ pReqParm->u.Embedded.cbData = pHostParm->u.pointer.size;
+
+ /* Copy out data. */
+ if ( pPtr->cbData > 0
+ && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
+ {
+ const void *pvSrc = pHostParm->u.pointer.addr;
+ uint32_t cbSrc = pHostParm->u.pointer.size;
+ uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
+ memcpy(pbReq + pPtr->offFirstPage, pvSrc, cbToCopy);
+ }
+ break;
+ }
+
+ case VMMDevHGCMParmType_ContiguousPageList:
+ {
+ const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+
+ /* Update size. */
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
+#endif
+ pReqParm->u.PageList.size = pHostParm->u.pointer.size;
+
+ /* Copy out data. */
+ if ( pPtr->cbData > 0
+ && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
+ {
+ const void *pvSrc = pHostParm->u.pointer.addr;
+ uint32_t cbSrc = pHostParm->u.pointer.size;
+ uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
+ int rc = PDMDevHlpPhysWrite(pDevIns, pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
+ pvSrc, cbToCopy);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ break;
+ }
+
+ case VMMDevHGCMParmType_NoBouncePageList:
+ {
+ /* Update size. */
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
+#endif
+ pReqParm->u.PageList.size = pHostParm->u.Pages.cb;
+
+ /* unlock early. */
+ if (pGuestParm->u.Pages.fLocked)
+ {
+ PDMDevHlpPhysBulkReleasePageMappingLocks(pDevIns, pGuestParm->u.Pages.cPages,
+ pGuestParm->u.Pages.paPgLocks);
+ pGuestParm->u.Pages.fLocked = false;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+/** Update HGCM request in the guest memory and mark it as completed.
+ *
+ * @returns VINF_SUCCESS or VERR_CANCELLED.
+ * @param pInterface Pointer to this PDM interface.
+ * @param result HGCM completion status code (VBox status code).
+ * @param pCmd Completed command, which contains updated host parameters.
+ *
+ * @thread EMT
+ */
+static int hgcmCompletedWorker(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
+ PPDMDEVINS pDevIns = pThisCC->pDevIns;
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+#ifdef VBOX_WITH_DTRACE
+ uint32_t idFunction = 0;
+ uint32_t idClient = 0;
+#endif
+
+ if (result == VINF_HGCM_SAVE_STATE)
+ {
+ /* If the completion routine was called while the HGCM service saves its state,
+ * then currently nothing to be done here. The pCmd stays in the list and will
+ * be saved later when the VMMDev state will be saved and re-submitted on load.
+ *
+ * It it assumed that VMMDev saves state after the HGCM services (VMMDev driver
+ * attached by constructor before it registers its SSM state), and, therefore,
+ * VBOXHGCMCMD structures are not removed by vmmdevR3HgcmSaveState from the list,
+ * while HGCM uses them.
+ */
+ LogFlowFunc(("VINF_HGCM_SAVE_STATE for command %p\n", pCmd));
+ return VINF_SUCCESS;
+ }
+
+ VBOXDD_HGCMCALL_COMPLETED_EMT(pCmd, result);
+
+ int rc = VINF_SUCCESS;
+
+ /*
+ * The cancellation protocol requires us to remove the command here
+ * and then check the flag. Cancelled commands must not be written
+ * back to guest memory.
+ */
+ vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
+
+ if (RT_LIKELY(!pCmd->fCancelled))
+ {
+ if (!pCmd->pvReqLocked)
+ {
+ /*
+ * Request is not locked:
+ */
+ VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
+ if (pHeader)
+ {
+ /*
+ * Read the request from the guest memory for updating.
+ * The request data is not be used for anything but checking the request type.
+ */
+ PDMDevHlpPhysRead(pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+
+ /* Verify the request type. This is the only field which is used from the guest memory. */
+ const VMMDevRequestType enmRequestType = pHeader->header.requestType;
+ if ( enmRequestType == pCmd->enmRequestType
+ || enmRequestType == VMMDevReq_HGCMCancel)
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /*
+ * Update parameters and data buffers.
+ */
+ switch (enmRequestType)
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ case VMMDevReq_HGCMCall64:
+#endif
+ case VMMDevReq_HGCMCall32:
+ {
+ VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
+ rc = vmmdevR3HgcmCompleteCallRequest(pDevIns, pCmd, pHGCMCall, (uint8_t *)pHeader);
+#ifdef VBOX_WITH_DTRACE
+ idFunction = pCmd->u.call.u32Function;
+ idClient = pCmd->u.call.u32ClientID;
+#endif
+ break;
+ }
+
+ case VMMDevReq_HGCMConnect:
+ {
+ /* save the client id in the guest request packet */
+ VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
+ pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
+ break;
+ }
+
+ default:
+ /* make compiler happy */
+ break;
+ }
+ }
+ else
+ {
+ /* Guest has changed the command type. */
+ LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
+ pCmd->enmCmdType, pHeader->header.requestType));
+
+ ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
+ }
+
+ /* Setup return code for the guest. */
+ if (RT_SUCCESS(rc))
+ pHeader->result = result;
+ else
+ pHeader->result = rc;
+
+ /* First write back the request. */
+ PDMDevHlpPhysWrite(pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest);
+
+ /* Mark request as processed. */
+ pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
+
+ /* Second write the flags to mark the request as processed. */
+ PDMDevHlpPhysWrite(pDevIns, pCmd->GCPhys + RT_UOFFSETOF(VMMDevHGCMRequestHeader, fu32Flags),
+ &pHeader->fu32Flags, sizeof(pHeader->fu32Flags));
+
+ /* Now, when the command was removed from the internal list, notify the guest. */
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM);
+
+ RTMemFreeZ(pHeader, pCmd->cbRequest);
+ }
+ else
+ {
+ LogRelMax(10, ("VMMDev: Failed to allocate %u bytes for HGCM request completion!!!\n", pCmd->cbRequest));
+ }
+ }
+ /*
+ * Request was locked:
+ */
+ else
+ {
+ VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
+
+ /* Verify the request type. This is the only field which is used from the guest memory. */
+ const VMMDevRequestType enmRequestType = pHeader->header.requestType;
+ if ( enmRequestType == pCmd->enmRequestType
+ || enmRequestType == VMMDevReq_HGCMCancel)
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /*
+ * Update parameters and data buffers.
+ */
+ switch (enmRequestType)
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ case VMMDevReq_HGCMCall64:
+#endif
+ case VMMDevReq_HGCMCall32:
+ {
+ VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
+ rc = vmmdevR3HgcmCompleteCallRequest(pDevIns, pCmd, pHGCMCall, (uint8_t *)pHeader);
+#ifdef VBOX_WITH_DTRACE
+ idFunction = pCmd->u.call.u32Function;
+ idClient = pCmd->u.call.u32ClientID;
+#endif
+ break;
+ }
+
+ case VMMDevReq_HGCMConnect:
+ {
+ /* save the client id in the guest request packet */
+ VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
+ pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
+ break;
+ }
+
+ default:
+ /* make compiler happy */
+ break;
+ }
+ }
+ else
+ {
+ /* Guest has changed the command type. */
+ LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
+ pCmd->enmCmdType, pHeader->header.requestType));
+
+ ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
+ }
+
+ /* Setup return code for the guest. */
+ if (RT_SUCCESS(rc))
+ pHeader->result = result;
+ else
+ pHeader->result = rc;
+
+ /* Mark request as processed. */
+ ASMAtomicOrU32(&pHeader->fu32Flags, VBOX_HGCM_REQ_DONE);
+
+ /* Now, when the command was removed from the internal list, notify the guest. */
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM);
+ }
+
+ /* Set the status to success for now, though we might consider passing
+ along the vmmdevR3HgcmCompleteCallRequest errors... */
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ LogFlowFunc(("Cancelled command %p\n", pCmd));
+ rc = VERR_CANCELLED;
+ }
+
+#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
+ /* Save for final stats. */
+ uint64_t const tsArrival = pCmd->tsArrival;
+ uint64_t const tsComplete = pCmd->tsComplete;
+#endif
+
+ /* Deallocate the command memory. Enter the critsect for proper */
+ VBOXDD_HGCMCALL_COMPLETED_DONE(pCmd, idFunction, idClient, result);
+ vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
+
+#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
+ /* Update stats. */
+ uint64_t tsNow;
+ STAM_GET_TS(tsNow);
+ STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->StatHgcmCmdCompletion, tsNow - tsComplete);
+ if (tsArrival != 0)
+ STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->StatHgcmCmdTotal, tsNow - tsArrival);
+#endif
+
+ return rc;
+}
+
+/**
+ * HGCM callback for request completion. Forwards to hgcmCompletedWorker.
+ *
+ * @returns VINF_SUCCESS or VERR_CANCELLED.
+ * @param pInterface Pointer to this PDM interface.
+ * @param result HGCM completion status code (VBox status code).
+ * @param pCmd Completed command, which contains updated host parameters.
+ */
+DECLCALLBACK(int) hgcmR3Completed(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
+{
+#if 0 /* This seems to be significantly slower. Half of MsgTotal time seems to be spend here. */
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
+ STAM_GET_TS(pCmd->tsComplete);
+
+ VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
+
+/** @todo no longer necessary to forward to EMT, but it might be more
+ * efficient...? */
+ /* Not safe to execute asynchronously; forward to EMT */
+ int rc = VMR3ReqCallVoidNoWait(PDMDevHlpGetVM(pDevIns), VMCPUID_ANY,
+ (PFNRT)hgcmCompletedWorker, 3, pInterface, result, pCmd);
+ AssertRC(rc);
+ return VINF_SUCCESS; /* cannot tell if canceled or not... */
+#else
+ STAM_GET_TS(pCmd->tsComplete);
+ VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
+ return hgcmCompletedWorker(pInterface, result, pCmd);
+#endif
+}
+
+/**
+ * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdRestored}
+ */
+DECLCALLBACK(bool) hgcmR3IsCmdRestored(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
+{
+ RT_NOREF(pInterface);
+ return pCmd && pCmd->fRestored;
+}
+
+/**
+ * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdCancelled}
+ */
+DECLCALLBACK(bool) hgcmR3IsCmdCancelled(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
+{
+ RT_NOREF(pInterface);
+ return pCmd && pCmd->fCancelled;
+}
+
+/**
+ * @interface_method_impl{PDMIHGCMPORT,pfnGetRequestor}
+ */
+DECLCALLBACK(uint32_t) hgcmR3GetRequestor(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVMMDEV);
+ AssertPtrReturn(pCmd, VMMDEV_REQUESTOR_LOWEST);
+ if (pThis->guestInfo2.fFeatures & VBOXGSTINFO2_F_REQUESTOR_INFO)
+ return pCmd->fRequestor;
+ return VMMDEV_REQUESTOR_LEGACY;
+}
+
+/**
+ * @interface_method_impl{PDMIHGCMPORT,pfnGetVMMDevSessionId}
+ */
+DECLCALLBACK(uint64_t) hgcmR3GetVMMDevSessionId(PPDMIHGCMPORT pInterface)
+{
+ PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVMMDEV);
+ return pThis->idSession;
+}
+
+/** Save information about pending HGCM requests from pThisCC->listHGCMCmd.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pSSM SSM handle for SSM functions.
+ *
+ * @thread EMT
+ */
+int vmmdevR3HgcmSaveState(PVMMDEVCC pThisCC, PSSMHANDLE pSSM)
+{
+ PCPDMDEVHLPR3 pHlp = pThisCC->pDevIns->pHlpR3;
+
+ LogFlowFunc(("\n"));
+
+ /* Compute how many commands are pending. */
+ uint32_t cCmds = 0;
+ PVBOXHGCMCMD pCmd;
+ RTListForEach(&pThisCC->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
+ {
+ LogFlowFunc(("pCmd %p\n", pCmd));
+ ++cCmds;
+ }
+ LogFlowFunc(("cCmds = %d\n", cCmds));
+
+ /* Save number of commands. */
+ int rc = pHlp->pfnSSMPutU32(pSSM, cCmds);
+ AssertRCReturn(rc, rc);
+
+ if (cCmds > 0)
+ {
+ RTListForEach(&pThisCC->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
+ {
+ LogFlowFunc(("Saving %RGp, size %d\n", pCmd->GCPhys, pCmd->cbRequest));
+
+ /** @todo Don't save cancelled requests! It serves no purpose. See restore and
+ * @bugref{4032#c4} for details. */
+ pHlp->pfnSSMPutU32 (pSSM, (uint32_t)pCmd->enmCmdType);
+ pHlp->pfnSSMPutBool (pSSM, pCmd->fCancelled);
+ pHlp->pfnSSMPutGCPhys (pSSM, pCmd->GCPhys);
+ pHlp->pfnSSMPutU32 (pSSM, pCmd->cbRequest);
+ pHlp->pfnSSMPutU32 (pSSM, (uint32_t)pCmd->enmRequestType);
+ const uint32_t cParms = pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.cParms : 0;
+ rc = pHlp->pfnSSMPutU32(pSSM, cParms);
+ AssertRCReturn(rc, rc);
+
+ if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ pHlp->pfnSSMPutU32 (pSSM, pCmd->u.call.u32ClientID);
+ rc = pHlp->pfnSSMPutU32(pSSM, pCmd->u.call.u32Function);
+ AssertRCReturn(rc, rc);
+
+ /* Guest parameters. */
+ uint32_t i;
+ for (i = 0; i < pCmd->u.call.cParms; ++i)
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+
+ rc = pHlp->pfnSSMPutU32(pSSM, (uint32_t)pGuestParm->enmType);
+ AssertRCReturn(rc, rc);
+
+ if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
+ || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
+ {
+ const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
+ pHlp->pfnSSMPutU64 (pSSM, pVal->u64Value);
+ pHlp->pfnSSMPutU32 (pSSM, pVal->offValue);
+ rc = pHlp->pfnSSMPutU32(pSSM, pVal->cbValue);
+ }
+ else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
+ || pGuestParm->enmType == VMMDevHGCMParmType_PageList
+ || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
+ || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
+ {
+ const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+ pHlp->pfnSSMPutU32 (pSSM, pPtr->cbData);
+ pHlp->pfnSSMPutU32 (pSSM, pPtr->offFirstPage);
+ pHlp->pfnSSMPutU32 (pSSM, pPtr->cPages);
+ rc = pHlp->pfnSSMPutU32(pSSM, pPtr->fu32Direction);
+
+ uint32_t iPage;
+ for (iPage = 0; RT_SUCCESS(rc) && iPage < pPtr->cPages; ++iPage)
+ rc = pHlp->pfnSSMPutGCPhys(pSSM, pPtr->paPages[iPage]);
+ }
+ else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
+ {
+ /* We don't have the page addresses here, so it will need to be
+ restored from guest memory. This isn't an issue as it is only
+ use with services which won't survive a save/restore anyway. */
+ }
+ else
+ {
+ AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
+ }
+ AssertRCReturn(rc, rc);
+ }
+ }
+ else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
+ {
+ pHlp->pfnSSMPutU32(pSSM, pCmd->u.connect.u32ClientID);
+ pHlp->pfnSSMPutMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
+ }
+ else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
+ {
+ pHlp->pfnSSMPutU32(pSSM, pCmd->u.disconnect.u32ClientID);
+ }
+ else
+ {
+ AssertFailedReturn(VERR_INTERNAL_ERROR);
+ }
+
+ /* A reserved field, will allow to extend saved data for a command. */
+ rc = pHlp->pfnSSMPutU32(pSSM, 0);
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
+ rc = pHlp->pfnSSMPutU32(pSSM, 0);
+ AssertRCReturn(rc, rc);
+
+ return rc;
+}
+
+/** Load information about pending HGCM requests.
+ *
+ * Allocate VBOXHGCMCMD commands and add them to pThisCC->listHGCMCmd
+ * temporarily. vmmdevR3HgcmLoadStateDone will process the temporary list. This
+ * includes loading the correct fRequestor fields.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param pSSM SSM handle for SSM functions.
+ * @param uVersion Saved state version.
+ *
+ * @thread EMT
+ */
+int vmmdevR3HgcmLoadState(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PSSMHANDLE pSSM, uint32_t uVersion)
+{
+ PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
+
+ LogFlowFunc(("\n"));
+
+ pThisCC->uSavedStateVersion = uVersion; /* For vmmdevR3HgcmLoadStateDone */
+
+ /* Read how many commands were pending. */
+ uint32_t cCmds = 0;
+ int rc = pHlp->pfnSSMGetU32(pSSM, &cCmds);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("cCmds = %d\n", cCmds));
+
+ if (uVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
+ {
+ /* Saved information about all HGCM parameters. */
+ uint32_t u32;
+
+ uint32_t iCmd;
+ for (iCmd = 0; iCmd < cCmds; ++iCmd)
+ {
+ /* Command fields. */
+ VBOXHGCMCMDTYPE enmCmdType;
+ bool fCancelled;
+ RTGCPHYS GCPhys;
+ uint32_t cbRequest;
+ VMMDevRequestType enmRequestType;
+ uint32_t cParms;
+
+ pHlp->pfnSSMGetU32 (pSSM, &u32);
+ enmCmdType = (VBOXHGCMCMDTYPE)u32;
+ pHlp->pfnSSMGetBool (pSSM, &fCancelled);
+ pHlp->pfnSSMGetGCPhys (pSSM, &GCPhys);
+ pHlp->pfnSSMGetU32 (pSSM, &cbRequest);
+ pHlp->pfnSSMGetU32 (pSSM, &u32);
+ enmRequestType = (VMMDevRequestType)u32;
+ rc = pHlp->pfnSSMGetU32(pSSM, &cParms);
+ AssertRCReturn(rc, rc);
+
+ PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, enmCmdType, GCPhys, cbRequest, cParms, 0 /*fRequestor*/);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ pCmd->fCancelled = fCancelled;
+ pCmd->GCPhys = GCPhys;
+ pCmd->cbRequest = cbRequest;
+ pCmd->enmRequestType = enmRequestType;
+
+ if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ pHlp->pfnSSMGetU32 (pSSM, &pCmd->u.call.u32ClientID);
+ rc = pHlp->pfnSSMGetU32(pSSM, &pCmd->u.call.u32Function);
+ AssertRCReturn(rc, rc);
+
+ /* Guest parameters. */
+ uint32_t i;
+ for (i = 0; i < cParms; ++i)
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+
+ rc = pHlp->pfnSSMGetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+ pGuestParm->enmType = (HGCMFunctionParameterType)u32;
+
+ if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
+ || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
+ {
+ VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
+ pHlp->pfnSSMGetU64 (pSSM, &pVal->u64Value);
+ pHlp->pfnSSMGetU32 (pSSM, &pVal->offValue);
+ rc = pHlp->pfnSSMGetU32(pSSM, &pVal->cbValue);
+ }
+ else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
+ || pGuestParm->enmType == VMMDevHGCMParmType_PageList
+ || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
+ || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
+ {
+ VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+ pHlp->pfnSSMGetU32 (pSSM, &pPtr->cbData);
+ pHlp->pfnSSMGetU32 (pSSM, &pPtr->offFirstPage);
+ pHlp->pfnSSMGetU32 (pSSM, &pPtr->cPages);
+ rc = pHlp->pfnSSMGetU32(pSSM, &pPtr->fu32Direction);
+ if (RT_SUCCESS(rc))
+ {
+ if (pPtr->cPages == 1)
+ pPtr->paPages = &pPtr->GCPhysSinglePage;
+ else
+ {
+ AssertReturn( pGuestParm->enmType != VMMDevHGCMParmType_Embedded
+ && pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList, VERR_INTERNAL_ERROR_3);
+ pPtr->paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd,
+ pPtr->cPages * sizeof(RTGCPHYS));
+ AssertStmt(pPtr->paPages, rc = VERR_NO_MEMORY);
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t iPage;
+ for (iPage = 0; iPage < pPtr->cPages; ++iPage)
+ rc = pHlp->pfnSSMGetGCPhys(pSSM, &pPtr->paPages[iPage]);
+ }
+ }
+ }
+ else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
+ {
+ /* This request type can only be stored from guest memory for now. */
+ pCmd->fRestoreFromGuestMem = true;
+ }
+ else
+ {
+ AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
+ }
+ AssertRCReturn(rc, rc);
+ }
+ }
+ else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
+ {
+ pHlp->pfnSSMGetU32(pSSM, &pCmd->u.connect.u32ClientID);
+ rc = pHlp->pfnSSMGetMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
+ AssertRCReturn(rc, rc);
+ }
+ else if (enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
+ {
+ rc = pHlp->pfnSSMGetU32(pSSM, &pCmd->u.disconnect.u32ClientID);
+ AssertRCReturn(rc, rc);
+ }
+ else
+ {
+ AssertFailedReturn(VERR_INTERNAL_ERROR);
+ }
+
+ /* A reserved field, will allow to extend saved data for a command. */
+ rc = pHlp->pfnSSMGetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Do not restore cancelled calls. Why do we save them to start with?
+ *
+ * The guest memory no longer contains a valid request! So, it is not
+ * possible to restore it. The memory is often reused for a new request
+ * by now and we will end up trying to complete that more than once if
+ * we restore a cancelled call. In some cases VERR_HGCM_INVALID_CLIENT_ID
+ * is returned, though it might just be silent memory corruption.
+ */
+ /* See current version above. */
+ if (!fCancelled)
+ vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
+ else
+ {
+ Log(("vmmdevR3HgcmLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
+ enmCmdType, GCPhys, cbRequest));
+ vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
+ }
+ }
+
+ /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
+ rc = pHlp->pfnSSMGetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+ }
+ else if (uVersion >= 9)
+ {
+ /* Version 9+: Load information about commands. Pre-rewrite. */
+ uint32_t u32;
+
+ uint32_t iCmd;
+ for (iCmd = 0; iCmd < cCmds; ++iCmd)
+ {
+ VBOXHGCMCMDTYPE enmCmdType;
+ bool fCancelled;
+ RTGCPHYS GCPhys;
+ uint32_t cbRequest;
+ uint32_t cLinAddrs;
+
+ pHlp->pfnSSMGetGCPhys (pSSM, &GCPhys);
+ rc = pHlp->pfnSSMGetU32(pSSM, &cbRequest);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
+
+ /* For uVersion <= 12, this was the size of entire command.
+ * Now the command is reconstructed in vmmdevR3HgcmLoadStateDone.
+ */
+ if (uVersion <= 12)
+ pHlp->pfnSSMSkip(pSSM, sizeof (uint32_t));
+
+ pHlp->pfnSSMGetU32 (pSSM, &u32);
+ enmCmdType = (VBOXHGCMCMDTYPE)u32;
+ pHlp->pfnSSMGetBool (pSSM, &fCancelled);
+ /* How many linear pointers. Always 0 if not a call command. */
+ rc = pHlp->pfnSSMGetU32(pSSM, &cLinAddrs);
+ AssertRCReturn(rc, rc);
+
+ PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, enmCmdType, GCPhys, cbRequest, cLinAddrs, 0 /*fRequestor*/);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ pCmd->fCancelled = fCancelled;
+ pCmd->GCPhys = GCPhys;
+ pCmd->cbRequest = cbRequest;
+
+ if (cLinAddrs > 0)
+ {
+ /* Skip number of pages for all LinAddrs in this command. */
+ pHlp->pfnSSMSkip(pSSM, sizeof(uint32_t));
+
+ uint32_t i;
+ for (i = 0; i < cLinAddrs; ++i)
+ {
+ VBOXHGCMPARMPTR * const pPtr = &pCmd->u.call.paGuestParms[i].u.ptr;
+
+ /* Index of the parameter. Use cbData field to store the index. */
+ pHlp->pfnSSMGetU32 (pSSM, &pPtr->cbData);
+ pHlp->pfnSSMGetU32 (pSSM, &pPtr->offFirstPage);
+ rc = pHlp->pfnSSMGetU32(pSSM, &pPtr->cPages);
+ AssertRCReturn(rc, rc);
+
+ pPtr->paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd, pPtr->cPages * sizeof(RTGCPHYS));
+ AssertReturn(pPtr->paPages, VERR_NO_MEMORY);
+
+ uint32_t iPage;
+ for (iPage = 0; iPage < pPtr->cPages; ++iPage)
+ rc = pHlp->pfnSSMGetGCPhys(pSSM, &pPtr->paPages[iPage]);
+ }
+ }
+
+ /* A reserved field, will allow to extend saved data for a command. */
+ rc = pHlp->pfnSSMGetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+
+ /* See current version above. */
+ if (!fCancelled)
+ vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
+ else
+ {
+ Log(("vmmdevR3HgcmLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
+ enmCmdType, GCPhys, cbRequest));
+ vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
+ }
+ }
+
+ /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
+ rc = pHlp->pfnSSMGetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+ }
+ else
+ {
+ /* Ancient. Only the guest physical address is saved. */
+ uint32_t iCmd;
+ for (iCmd = 0; iCmd < cCmds; ++iCmd)
+ {
+ RTGCPHYS GCPhys;
+ uint32_t cbRequest;
+
+ pHlp->pfnSSMGetGCPhys(pSSM, &GCPhys);
+ rc = pHlp->pfnSSMGetU32(pSSM, &cbRequest);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
+
+ PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_LOADSTATE, GCPhys, cbRequest, 0, 0 /*fRequestor*/);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
+ }
+ }
+
+ return rc;
+}
+
+/** Restore HGCM connect command loaded from old saved state.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param uSavedStateVersion The saved state version the command has been loaded from.
+ * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
+ * @param pReq The guest request (cached in host memory).
+ * @param cbReq Size of the guest request.
+ * @param enmRequestType Type of the HGCM request.
+ * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
+ */
+static int vmmdevR3HgcmRestoreConnect(PVMMDEVCC pThisCC, uint32_t uSavedStateVersion, const VBOXHGCMCMD *pLoadedCmd,
+ VMMDevHGCMConnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
+ VBOXHGCMCMD **ppRestoredCmd)
+{
+ /* Verify the request. */
+ ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
+ if (uSavedStateVersion >= 9)
+ ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT, VERR_MISMATCH);
+
+ PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_CONNECT, pLoadedCmd->GCPhys, cbReq, 0,
+ pReq->header.header.fRequestor);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ Assert(pLoadedCmd->fCancelled == false);
+ pCmd->fCancelled = false;
+ pCmd->fRestored = true;
+ pCmd->enmRequestType = enmRequestType;
+
+ vmmdevR3HgcmConnectFetch(pReq, pCmd);
+
+ *ppRestoredCmd = pCmd;
+ return VINF_SUCCESS;
+}
+
+/** Restore HGCM disconnect command loaded from old saved state.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param uSavedStateVersion The saved state version the command has been loaded from.
+ * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
+ * @param pReq The guest request (cached in host memory).
+ * @param cbReq Size of the guest request.
+ * @param enmRequestType Type of the HGCM request.
+ * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
+ */
+static int vmmdevR3HgcmRestoreDisconnect(PVMMDEVCC pThisCC, uint32_t uSavedStateVersion, const VBOXHGCMCMD *pLoadedCmd,
+ VMMDevHGCMDisconnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
+ VBOXHGCMCMD **ppRestoredCmd)
+{
+ /* Verify the request. */
+ ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
+ if (uSavedStateVersion >= 9)
+ ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT, VERR_MISMATCH);
+
+ PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_DISCONNECT, pLoadedCmd->GCPhys, cbReq, 0,
+ pReq->header.header.fRequestor);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ Assert(pLoadedCmd->fCancelled == false);
+ pCmd->fCancelled = false;
+ pCmd->fRestored = true;
+ pCmd->enmRequestType = enmRequestType;
+
+ vmmdevR3HgcmDisconnectFetch(pReq, pCmd);
+
+ *ppRestoredCmd = pCmd;
+ return VINF_SUCCESS;
+}
+
+/** Restore HGCM call command loaded from old saved state.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param uSavedStateVersion The saved state version the command has been loaded from.
+ * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
+ * @param pReq The guest request (cached in host memory).
+ * @param cbReq Size of the guest request.
+ * @param enmRequestType Type of the HGCM request.
+ * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
+ */
+static int vmmdevR3HgcmRestoreCall(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t uSavedStateVersion,
+ const VBOXHGCMCMD *pLoadedCmd, VMMDevHGCMCall *pReq, uint32_t cbReq,
+ VMMDevRequestType enmRequestType, VBOXHGCMCMD **ppRestoredCmd)
+{
+ /* Verify the request. */
+ ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
+ if (uSavedStateVersion >= 9)
+ {
+ ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_MISMATCH);
+ Assert(pLoadedCmd->fCancelled == false);
+ }
+
+ PVBOXHGCMCMD pCmd;
+ uint32_t cbHGCMParmStruct;
+ int rc = vmmdevR3HgcmCallAlloc(pThisCC, pReq, cbReq, pLoadedCmd->GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* pLoadedCmd is fake, it does not contain actual call parameters. Only pagelists for LinAddr. */
+ pCmd->fCancelled = false;
+ pCmd->fRestored = true;
+ pCmd->enmRequestType = enmRequestType;
+
+ rc = vmmdevR3HgcmCallFetchGuestParms(pDevIns, pThisCC, pCmd, pReq, cbReq, enmRequestType, cbHGCMParmStruct);
+ if (RT_SUCCESS(rc))
+ {
+ /* Update LinAddr parameters from pLoadedCmd.
+ * pLoadedCmd->u.call.cParms is actually the number of LinAddrs, see vmmdevR3HgcmLoadState.
+ */
+ uint32_t iLinAddr;
+ for (iLinAddr = 0; iLinAddr < pLoadedCmd->u.call.cParms; ++iLinAddr)
+ {
+ VBOXHGCMGUESTPARM * const pLoadedParm = &pLoadedCmd->u.call.paGuestParms[iLinAddr];
+ /* pLoadedParm->cbData is actually index of the LinAddr parameter, see vmmdevR3HgcmLoadState. */
+ const uint32_t iParm = pLoadedParm->u.ptr.cbData;
+ ASSERT_GUEST_STMT_BREAK(iParm < pCmd->u.call.cParms, rc = VERR_MISMATCH);
+
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[iParm];
+ ASSERT_GUEST_STMT_BREAK( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr,
+ rc = VERR_MISMATCH);
+ ASSERT_GUEST_STMT_BREAK( pLoadedParm->u.ptr.offFirstPage == pGuestParm->u.ptr.offFirstPage
+ && pLoadedParm->u.ptr.cPages == pGuestParm->u.ptr.cPages,
+ rc = VERR_MISMATCH);
+ memcpy(pGuestParm->u.ptr.paPages, pLoadedParm->u.ptr.paPages, pGuestParm->u.ptr.cPages * sizeof(RTGCPHYS));
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ *ppRestoredCmd = pCmd;
+ else
+ vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
+
+ return rc;
+}
+
+/** Allocate and initialize a HGCM command using the given request (pReqHdr)
+ * and command loaded from saved state (pCmd).
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ * @param uSavedStateVersion Saved state version.
+ * @param pLoadedCmd HGCM command which needs restoration.
+ * @param pReqHdr The request (cached in host memory).
+ * @param cbReq Size of the entire request (including HGCM parameters).
+ * @param ppRestoredCmd Where to store pointer to restored command.
+ */
+static int vmmdevR3HgcmRestoreCommand(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t uSavedStateVersion,
+ const VBOXHGCMCMD *pLoadedCmd, const VMMDevHGCMRequestHeader *pReqHdr, uint32_t cbReq,
+ VBOXHGCMCMD **ppRestoredCmd)
+{
+ int rc;
+
+ /* Verify the request. */
+ ASSERT_GUEST_RETURN(cbReq >= sizeof(VMMDevHGCMRequestHeader), VERR_MISMATCH);
+ ASSERT_GUEST_RETURN(cbReq == pReqHdr->header.size, VERR_MISMATCH);
+
+ const VMMDevRequestType enmRequestType = pReqHdr->header.requestType;
+ switch (enmRequestType)
+ {
+ case VMMDevReq_HGCMConnect:
+ {
+ VMMDevHGCMConnect *pReq = (VMMDevHGCMConnect *)pReqHdr;
+ rc = vmmdevR3HgcmRestoreConnect(pThisCC, uSavedStateVersion, pLoadedCmd, pReq, cbReq, enmRequestType, ppRestoredCmd);
+ break;
+ }
+
+ case VMMDevReq_HGCMDisconnect:
+ {
+ VMMDevHGCMDisconnect *pReq = (VMMDevHGCMDisconnect *)pReqHdr;
+ rc = vmmdevR3HgcmRestoreDisconnect(pThisCC, uSavedStateVersion, pLoadedCmd, pReq, cbReq, enmRequestType, ppRestoredCmd);
+ break;
+ }
+
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ case VMMDevReq_HGCMCall64:
+#endif
+ case VMMDevReq_HGCMCall32:
+ {
+ VMMDevHGCMCall *pReq = (VMMDevHGCMCall *)pReqHdr;
+ rc = vmmdevR3HgcmRestoreCall(pDevIns, pThis, pThisCC, uSavedStateVersion, pLoadedCmd,
+ pReq, cbReq, enmRequestType, ppRestoredCmd);
+ break;
+ }
+
+ default:
+ ASSERT_GUEST_FAILED_RETURN(VERR_MISMATCH);
+ }
+
+ return rc;
+}
+
+/** Resubmit pending HGCM commands which were loaded form saved state.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ *
+ * @thread EMT
+ */
+int vmmdevR3HgcmLoadStateDone(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC)
+{
+ /*
+ * Resubmit pending HGCM commands to services.
+ *
+ * pThisCC->pHGCMCmdList contains commands loaded by vmmdevR3HgcmLoadState.
+ *
+ * Legacy saved states (pre VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
+ * do not have enough information about the command parameters,
+ * therefore it is necessary to reload at least some data from the
+ * guest memory to construct commands.
+ *
+ * There are two types of legacy saved states which contain:
+ * 1) the guest physical address and size of request;
+ * 2) additionally page lists for LinAddr parameters.
+ *
+ * Legacy commands have enmCmdType = VBOXHGCMCMDTYPE_LOADSTATE?
+ */
+
+ int rcFunc = VINF_SUCCESS; /* This status code will make the function fail. I.e. VM will not start. */
+
+ /* Get local copy of the list of loaded commands. */
+ RTLISTANCHOR listLoadedCommands;
+ RTListMove(&listLoadedCommands, &pThisCC->listHGCMCmd);
+
+ /* Resubmit commands. */
+ PVBOXHGCMCMD pCmd, pNext;
+ RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
+ {
+ int rcCmd = VINF_SUCCESS; /* This status code will make the HGCM command fail for the guest. */
+
+ RTListNodeRemove(&pCmd->node);
+
+ /*
+ * Re-read the request from the guest memory.
+ * It will be used to:
+ * * reconstruct commands if legacy saved state has been restored;
+ * * report an error to the guest if resubmit failed.
+ */
+ VMMDevHGCMRequestHeader *pReqHdr = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
+ AssertBreakStmt(pReqHdr, vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd); rcFunc = VERR_NO_MEMORY);
+
+ PDMDevHlpPhysRead(pDevIns, pCmd->GCPhys, pReqHdr, pCmd->cbRequest);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+
+ if (pThisCC->pHGCMDrv)
+ {
+ /*
+ * Reconstruct legacy commands.
+ */
+ if (RT_LIKELY( pThisCC->uSavedStateVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS
+ && !pCmd->fRestoreFromGuestMem))
+ { /* likely */ }
+ else
+ {
+ PVBOXHGCMCMD pRestoredCmd = NULL;
+ rcCmd = vmmdevR3HgcmRestoreCommand(pDevIns, pThis, pThisCC, pThisCC->uSavedStateVersion, pCmd,
+ pReqHdr, pCmd->cbRequest, &pRestoredCmd);
+ if (RT_SUCCESS(rcCmd))
+ {
+ Assert(pCmd != pRestoredCmd); /* vmmdevR3HgcmRestoreCommand must allocate restored command. */
+ vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
+ pCmd = pRestoredCmd;
+ }
+ }
+
+ /* Resubmit commands. */
+ if (RT_SUCCESS(rcCmd))
+ {
+ switch (pCmd->enmCmdType)
+ {
+ case VBOXHGCMCMDTYPE_CONNECT:
+ {
+ vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
+ rcCmd = pThisCC->pHGCMDrv->pfnConnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.connect.pLoc,
+ &pCmd->u.connect.u32ClientID);
+ if (RT_FAILURE(rcCmd))
+ vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
+ break;
+ }
+
+ case VBOXHGCMCMDTYPE_DISCONNECT:
+ {
+ vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
+ rcCmd = pThisCC->pHGCMDrv->pfnDisconnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
+ if (RT_FAILURE(rcCmd))
+ vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
+ break;
+ }
+
+ case VBOXHGCMCMDTYPE_CALL:
+ {
+ rcCmd = vmmdevR3HgcmInitHostParameters(pDevIns, pThisCC, pCmd, (uint8_t const *)pReqHdr);
+ if (RT_SUCCESS(rcCmd))
+ {
+ vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
+
+ /* Pass the function call to HGCM connector for actual processing */
+ uint64_t tsNow;
+ STAM_GET_TS(tsNow);
+ rcCmd = pThisCC->pHGCMDrv->pfnCall(pThisCC->pHGCMDrv, pCmd,
+ pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
+ pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsNow);
+ if (RT_FAILURE(rcCmd))
+ {
+ LogFunc(("pfnCall rc = %Rrc\n", rcCmd));
+ vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
+ }
+ }
+ break;
+ }
+
+ default:
+ AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
+ }
+ }
+ }
+ else
+ AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
+
+ if (RT_SUCCESS(rcCmd))
+ { /* likely */ }
+ else
+ {
+ /* Return the error to the guest. Guest may try to repeat the call. */
+ pReqHdr->result = rcCmd;
+ pReqHdr->header.rc = rcCmd;
+ pReqHdr->fu32Flags |= VBOX_HGCM_REQ_DONE;
+
+ /* Write back only the header. */
+ PDMDevHlpPhysWrite(pDevIns, pCmd->GCPhys, pReqHdr, sizeof(*pReqHdr));
+
+ VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM);
+
+ /* Deallocate the command memory. */
+ vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
+ }
+
+ RTMemFree(pReqHdr);
+ }
+
+ if (RT_FAILURE(rcFunc))
+ {
+ RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
+ {
+ RTListNodeRemove(&pCmd->node);
+ vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
+ }
+ }
+
+ return rcFunc;
+}
+
+
+/**
+ * Counterpart to vmmdevR3HgcmInit().
+ *
+ * @param pDevIns The device instance.
+ * @param pThis The VMMDev shared instance data.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ */
+void vmmdevR3HgcmDestroy(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC)
+{
+ LogFlowFunc(("\n"));
+
+ if (RTCritSectIsInitialized(&pThisCC->critsectHGCMCmdList))
+ {
+ PVBOXHGCMCMD pCmd, pNext;
+ RTListForEachSafe(&pThisCC->listHGCMCmd, pCmd, pNext, VBOXHGCMCMD, node)
+ {
+ vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
+ vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
+ }
+
+ RTCritSectDelete(&pThisCC->critsectHGCMCmdList);
+ }
+
+ AssertCompile(NIL_RTMEMCACHE == (RTMEMCACHE)0);
+ if (pThisCC->hHgcmCmdCache != NIL_RTMEMCACHE)
+ {
+ RTMemCacheDestroy(pThisCC->hHgcmCmdCache);
+ pThisCC->hHgcmCmdCache = NIL_RTMEMCACHE;
+ }
+}
+
+
+/**
+ * Initializes the HGCM specific state.
+ *
+ * Keeps VBOXHGCMCMDCACHED and friends local.
+ *
+ * @returns VBox status code.
+ * @param pThisCC The VMMDev ring-3 instance data.
+ */
+int vmmdevR3HgcmInit(PVMMDEVCC pThisCC)
+{
+ LogFlowFunc(("\n"));
+
+ RTListInit(&pThisCC->listHGCMCmd);
+
+ int rc = RTCritSectInit(&pThisCC->critsectHGCMCmdList);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = RTMemCacheCreate(&pThisCC->hHgcmCmdCache, sizeof(VBOXHGCMCMDCACHED), 64, _1M, NULL, NULL, NULL, 0);
+ AssertLogRelRCReturn(rc, rc);
+
+ pThisCC->u32HGCMEnabled = 0;
+
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Devices/VMMDev/VMMDevHGCM.h b/src/VBox/Devices/VMMDev/VMMDevHGCM.h
new file mode 100644
index 00000000..4547fcbf
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevHGCM.h
@@ -0,0 +1,62 @@
+/* $Id: VMMDevHGCM.h $ */
+/** @file
+ * VBoxDev - HGCM - Host-Guest Communication Manager, internal header.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_INCLUDED_SRC_VMMDev_VMMDevHGCM_h
+#define VBOX_INCLUDED_SRC_VMMDev_VMMDevHGCM_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include "VMMDevState.h"
+
+RT_C_DECLS_BEGIN
+int vmmdevR3HgcmConnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
+ const VMMDevHGCMConnect *pHGCMConnect, RTGCPHYS GCPhys);
+int vmmdevR3HgcmDisconnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
+ const VMMDevHGCMDisconnect *pHGCMDisconnect, RTGCPHYS GCPhys);
+int vmmdevR3HgcmCall(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall,
+ RTGCPHYS GCPhys, VMMDevRequestType enmRequestType, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock);
+
+int vmmdevR3HgcmCancel(PVMMDEVCC pThisCC, const VMMDevHGCMCancel *pHGCMCancel, RTGCPHYS GCPhys);
+int vmmdevR3HgcmCancel2(PVMMDEVCC pThisCC, RTGCPHYS GCPhys);
+
+DECLCALLBACK(int) hgcmR3Completed(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmdPtr);
+DECLCALLBACK(bool) hgcmR3IsCmdRestored(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd);
+DECLCALLBACK(bool) hgcmR3IsCmdCancelled(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd);
+DECLCALLBACK(uint32_t) hgcmR3GetRequestor(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd);
+DECLCALLBACK(uint64_t) hgcmR3GetVMMDevSessionId(PPDMIHGCMPORT pInterface);
+
+int vmmdevR3HgcmSaveState(PVMMDEVCC pThisCC, PSSMHANDLE pSSM);
+int vmmdevR3HgcmLoadState(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PSSMHANDLE pSSM, uint32_t uVersion);
+int vmmdevR3HgcmLoadStateDone(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC);
+
+void vmmdevR3HgcmDestroy(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC);
+int vmmdevR3HgcmInit(PVMMDEVCC pThisCC);
+RT_C_DECLS_END
+
+#endif /* !VBOX_INCLUDED_SRC_VMMDev_VMMDevHGCM_h */
+
diff --git a/src/VBox/Devices/VMMDev/VMMDevState.h b/src/VBox/Devices/VMMDev/VMMDevState.h
new file mode 100644
index 00000000..fe429ac5
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevState.h
@@ -0,0 +1,589 @@
+/* $Id: VMMDevState.h $ */
+/** @file
+ * VMMDev - Guest <-> VMM/Host communication device, internal header.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_INCLUDED_SRC_VMMDev_VMMDevState_h
+#define VBOX_INCLUDED_SRC_VMMDev_VMMDevState_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBoxVideo.h> /* For VBVA definitions. */
+#include <VBox/VMMDev.h>
+#include <VBox/vmm/pdmdev.h>
+#include <VBox/vmm/pdmifs.h>
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+# include <VBox/vmm/pdmthread.h>
+# include <iprt/test.h>
+# include <VBox/VMMDevTesting.h>
+#endif
+
+#include <iprt/list.h>
+#include <iprt/memcache.h>
+
+
+#define VMMDEV_WITH_ALT_TIMESYNC
+
+/** Request locking structure (HGCM optimization). */
+typedef struct VMMDEVREQLOCK
+{
+ void *pvReq;
+ PGMPAGEMAPLOCK Lock;
+} VMMDEVREQLOCK;
+/** Pointer to a request lock structure. */
+typedef VMMDEVREQLOCK *PVMMDEVREQLOCK;
+
+typedef struct DISPLAYCHANGEREQUEST
+{
+ bool fPending;
+ bool afAlignment[3];
+ VMMDevDisplayDef displayChangeRequest;
+ VMMDevDisplayDef lastReadDisplayChangeRequest;
+} DISPLAYCHANGEREQUEST;
+
+typedef struct DISPLAYCHANGEDATA
+{
+ /* Which monitor is being reported to the guest. */
+ int32_t iCurrentMonitor;
+
+ /** true if the guest responded to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST at least once */
+ bool fGuestSentChangeEventAck;
+ bool afAlignment[3];
+
+ DISPLAYCHANGEREQUEST aRequests[VBOX_VIDEO_MAX_SCREENS];
+} DISPLAYCHANGEDATA;
+
+
+/**
+ * Credentials for automatic guest logon and host configured logon (?).
+ *
+ * This is not stored in the same block as the instance data in order to make it
+ * harder to access.
+ */
+typedef struct VMMDEVCREDS
+{
+ /** credentials for guest logon purposes */
+ struct
+ {
+ char szUserName[VMMDEV_CREDENTIALS_SZ_SIZE];
+ char szPassword[VMMDEV_CREDENTIALS_SZ_SIZE];
+ char szDomain[VMMDEV_CREDENTIALS_SZ_SIZE];
+ bool fAllowInteractiveLogon;
+ } Logon;
+
+ /** credentials for verification by guest */
+ struct
+ {
+ char szUserName[VMMDEV_CREDENTIALS_SZ_SIZE];
+ char szPassword[VMMDEV_CREDENTIALS_SZ_SIZE];
+ char szDomain[VMMDEV_CREDENTIALS_SZ_SIZE];
+ } Judge;
+} VMMDEVCREDS;
+
+
+/**
+ * Facility status entry.
+ */
+typedef struct VMMDEVFACILITYSTATUSENTRY
+{
+ /** The facility (may contain values other than the defined ones). */
+ VBoxGuestFacilityType enmFacility;
+ /** The status (may contain values other than the defined ones). */
+ VBoxGuestFacilityStatus enmStatus;
+ /** Whether this entry is fixed and cannot be reused when inactive. */
+ bool fFixed;
+ /** Explicit alignment padding / reserved for future use. MBZ. */
+ bool afPadding[3];
+ /** The facility flags (yet to be defined). */
+ uint32_t fFlags;
+ /** Last update timestamp. */
+ RTTIMESPEC TimeSpecTS;
+} VMMDEVFACILITYSTATUSENTRY;
+/** Pointer to a facility status entry. */
+typedef VMMDEVFACILITYSTATUSENTRY *PVMMDEVFACILITYSTATUSENTRY;
+
+
+/**
+ * State structure for the VMM device.
+ */
+typedef struct VMMDEV
+{
+ /** The critical section for this device.
+ * @remarks We use this rather than the default one, it's simpler with all
+ * the driver interfaces where we have to waste time digging out the
+ * PDMDEVINS structure. */
+ PDMCRITSECT CritSect;
+#if !defined(VBOX_WITHOUT_TESTING_FEATURES) || defined(DOXYGEN_RUNNING)
+ /** Read write critical section of lock testing.
+ * @remarks At the beginning to satisfy 64 byte alignment requirement. */
+ PDMCRITSECTRW CritSectRw;
+#endif
+
+ /** mouse capabilities of host and guest */
+ uint32_t fMouseCapabilities;
+ /** @name Absolute mouse position in pixels, relative wheel movement and buttons state.
+ * @{ */
+ int32_t xMouseAbs;
+ int32_t yMouseAbs;
+ int32_t dzMouse;
+ int32_t dwMouse;
+ uint32_t fMouseButtons;
+ /** @} */
+ /** Does the guest currently want the host pointer to be shown? */
+ uint32_t fHostCursorRequested;
+
+ /** message buffer for backdoor logging. */
+ char szMsg[512];
+ /** message buffer index. */
+ uint32_t offMsg;
+ /** Alignment padding. */
+ uint32_t u32Alignment2;
+
+ /** Statistics counter for slow IRQ ACK. */
+ STAMCOUNTER StatSlowIrqAck;
+ /** Statistics counter for fast IRQ ACK - R3. */
+ STAMCOUNTER StatFastIrqAckR3;
+ /** Statistics counter for fast IRQ ACK - R0 / RC. */
+ STAMCOUNTER StatFastIrqAckRZ;
+ /** Current host side event flags - VMMDEV_EVENT_XXX. */
+ uint32_t fHostEventFlags;
+ /** Mask of events guest is interested in - VMMDEV_EVENT_XXX.
+ * @note The HGCM events are enabled automatically by the VMMDev device when
+ * guest issues HGCM commands. */
+ uint32_t fGuestFilterMask;
+ /** Delayed mask of guest events - VMMDEV_EVENT_XXX. */
+ uint32_t fNewGuestFilterMask;
+ /** Flag whether fNewGuestFilterMask is valid */
+ bool fNewGuestFilterMaskValid;
+ /** Alignment padding. */
+ bool afAlignment3[3];
+
+ /** Information reported by guest via VMMDevReportGuestInfo generic request.
+ * Until this information is reported the VMMDev refuses any other requests.
+ */
+ VBoxGuestInfo guestInfo;
+ /** Information report \#2, chewed a little. */
+ struct
+ {
+ uint32_t uFullVersion; /**< non-zero if info is present. */
+ uint32_t uRevision;
+ uint32_t fFeatures;
+ char szName[128];
+ } guestInfo2;
+
+ /** Array of guest facility statuses. */
+ VMMDEVFACILITYSTATUSENTRY aFacilityStatuses[32];
+ /** The number of valid entries in the facility status array. */
+ uint32_t cFacilityStatuses;
+
+ /** Information reported by guest via VMMDevReportGuestCapabilities - VMMDEV_GUEST_SUPPORTS_XXX. */
+ uint32_t fGuestCaps;
+
+ /** "Additions are Ok" indicator, set to true after processing VMMDevReportGuestInfo,
+ * if additions version is compatible. This flag is here to avoid repeated comparing
+ * of the version in guestInfo.
+ */
+ uint32_t fu32AdditionsOk;
+
+ /** Video acceleration status set by guest. */
+ uint32_t u32VideoAccelEnabled;
+
+ DISPLAYCHANGEDATA displayChangeData;
+
+ /** memory balloon change request */
+ uint32_t cMbMemoryBalloon;
+ /** The last balloon size queried by the guest additions. */
+ uint32_t cMbMemoryBalloonLast;
+
+ /** guest ram size */
+ uint64_t cbGuestRAM;
+
+ /** unique session id; the id will be different after each start, reset or restore of the VM. */
+ uint64_t idSession;
+
+ /** Statistics interval in seconds. */
+ uint32_t cSecsStatInterval;
+ /** The statistics interval last returned to the guest. */
+ uint32_t cSecsLastStatInterval;
+
+ /** Whether seamless is enabled or not. */
+ bool fSeamlessEnabled;
+ /** The last fSeamlessEnabled state returned to the guest. */
+ bool fLastSeamlessEnabled;
+ bool afAlignment5[1];
+
+ bool fVRDPEnabled;
+ uint32_t uVRDPExperienceLevel;
+
+#ifdef VMMDEV_WITH_ALT_TIMESYNC
+ uint64_t msLatchedHostTime;
+ bool fTimesyncBackdoorLo;
+ bool afAlignment6[1];
+#else
+ bool afAlignment6[2];
+#endif
+
+ /** Set if guest should be allowed to trigger state save and power off. */
+ bool fAllowGuestToSaveState;
+ /** Set if GetHostTime should fail.
+ * Loaded from the GetHostTimeDisabled configuration value. */
+ bool fGetHostTimeDisabled;
+ /** Set if backdoor logging should be disabled (output will be ignored then) */
+ bool fBackdoorLogDisabled;
+ /** Don't clear credentials */
+ bool fKeepCredentials;
+ /** Heap enabled. */
+ bool fHeapEnabled;
+
+ /** Guest Core Dumping enabled. */
+ bool fGuestCoreDumpEnabled;
+ /** Guest Core Dump location. */
+ char szGuestCoreDumpDir[RTPATH_MAX];
+ /** Number of additional cores to keep around. */
+ uint32_t cGuestCoreDumps;
+
+ /** FLag whether CPU hotplug events are monitored */
+ bool fCpuHotPlugEventsEnabled;
+ /** Alignment padding. */
+ bool afPadding8[3];
+ /** CPU hotplug event */
+ VMMDevCpuEventType enmCpuHotPlugEvent;
+ /** Core id of the CPU to change */
+ uint32_t idCpuCore;
+ /** Package id of the CPU to change */
+ uint32_t idCpuPackage;
+
+ uint32_t StatMemBalloonChunks;
+
+ /** @name Heartbeat
+ * @{ */
+ /** Timestamp of the last heartbeat from guest in nanosec. */
+ uint64_t volatile nsLastHeartbeatTS;
+ /** Indicates whether we missed HB from guest on last check. */
+ bool volatile fFlatlined;
+ /** Indicates whether heartbeat check is active. */
+ bool volatile fHeartbeatActive;
+ /** Alignment padding. */
+ bool afAlignment8[6];
+ /** Guest heartbeat interval in nanoseconds.
+ * This is the interval the guest is told to produce heartbeats at. */
+ uint64_t cNsHeartbeatInterval;
+ /** The amount of time without a heartbeat (nanoseconds) before we
+ * conclude the guest is doing a Dixie Flatline (Neuromancer) impression. */
+ uint64_t cNsHeartbeatTimeout;
+ /** Timer for signalling a flatlined guest. */
+ TMTIMERHANDLE hFlatlinedTimer;
+ /** @} */
+
+ /** @name Testing
+ * @{ */
+ /** Set if testing is enabled. */
+ bool fTestingEnabled;
+ /** Set if testing the MMIO testing range is enabled. */
+ bool fTestingMMIO;
+#if defined(VBOX_WITHOUT_TESTING_FEATURES) && !defined(DOXYGEN_RUNNING)
+ /** Alignment padding. */
+ bool afPadding9[2];
+#else
+ /** The amount of readable testing data (for query response). */
+ uint16_t cbReadableTestingData;
+ /** The high timestamp value. */
+ uint32_t u32TestingHighTimestamp;
+ /** The current testing command (VMMDEV_TESTING_CMD_XXX). */
+ uint32_t u32TestingCmd;
+ /** The testing data offset (command specific). */
+ uint32_t offTestingData;
+ /** For buffering the what comes in over the testing data port. */
+ union
+ {
+ /** Plain byte view. */
+ uint8_t ab[1024];
+
+ /** VMMDEV_TESTING_CMD_INIT, VMMDEV_TESTING_CMD_SUB_NEW,
+ * VMMDEV_TESTING_CMD_FAILED. */
+ struct
+ {
+ char sz[1024];
+ } String, Init, SubNew, Failed;
+
+ /** VMMDEV_TESTING_CMD_TERM, VMMDEV_TESTING_CMD_SUB_DONE. */
+ struct
+ {
+ uint32_t c;
+ } Error, Term, SubDone;
+
+ /** VMMDEV_TESTING_CMD_VALUE. */
+ struct
+ {
+ RTUINT64U u64Value;
+ uint32_t u32Unit;
+ char szName[1024 - 8 - 4];
+ } Value;
+
+ /** A 8-bit VMMDEV_TESTING_QUERY_CFG response. */
+ uint8_t b;
+ /** A 32-bit VMMDEV_TESTING_QUERY_CFG response. */
+ uint32_t u32;
+
+ /** The read back register (VMMDEV_TESTING_MMIO_OFF_READBACK,
+ * VMMDEV_TESTING_MMIO_OFF_READBACK_R3). */
+ uint8_t abReadBack[VMMDEV_TESTING_READBACK_SIZE];
+ } TestingData;
+ /** The locking testing control dword. */
+ union
+ {
+ /** Plain view. */
+ uint64_t u64;
+ /** Plain 32-bit view. */
+ uint32_t au32[2];
+ struct
+ {
+ /** bits 15:0: Number of microseconds to hold the lock. */
+ uint32_t cUsHold : 16;
+ /** bits 31:16: Number of microseconds to wait before retaking the lock again. */
+ uint32_t cUsBetween : 16;
+ /** bits 51:32: Kilo (1024) ticks the EMT should hold the lock for. */
+ uint32_t cKiloTicksEmtHold : 20;
+ /** bits 57:52: Reserved MBZ. */
+ uint32_t uReserved : 6;
+ /** bit 58: Thread takes lock in shared mode when set, exclusive when clear. */
+ uint32_t fThreadShared : 1;
+ /** bit 59: EMT takes lock in shared mode when set, exclusive when clear. */
+ uint32_t fEmtShared : 1;
+ /** bit 60: Use read/write critical section instead of regular. */
+ uint32_t fReadWriteSection : 1;
+ /** bit 61: EMT passes VINF_SUCCESS as rcBusy if set. */
+ uint32_t fMustSucceed : 1;
+ /** bit 62: Thread pokes EMTs before releasing it when set. */
+ uint32_t fPokeBeforeRelease : 1;
+ /** bit 63: Enabled/disabled. */
+ uint32_t fEnabled : 1;
+ } s;
+ } TestingLockControl;
+ /** Event semaphore that the locking thread blocks. */
+ SUPSEMEVENT hTestingLockEvt;
+# if HC_ARCH_BITS == 32
+ uint32_t uPadding10;
+# endif
+ /** Handle for the I/O ports used by the testing component. */
+ IOMIOPORTHANDLE hIoPortTesting;
+ /** Handle for the MMIO region used by the testing component. */
+ IOMMMIOHANDLE hMmioTesting;
+ /** User defined configuration dwords. */
+ uint32_t au32TestingCfgDwords[10];
+#endif /* !VBOX_WITHOUT_TESTING_FEATURES || DOXYGEN_RUNNING */
+ /** @} */
+
+ /** Handle for the backdoor logging I/O port. */
+ IOMIOPORTHANDLE hIoPortBackdoorLog;
+ /** Handle for the alternative timesync I/O port. */
+ IOMIOPORTHANDLE hIoPortAltTimesync;
+ /** Handle for the VMM request I/O port (PCI region \#0). */
+ IOMIOPORTHANDLE hIoPortReq;
+ /** Handle for the fast VMM request I/O port (PCI region \#0). */
+ IOMIOPORTHANDLE hIoPortFast;
+ /** Handle for the VMMDev RAM (PCI region \#1). */
+ PGMMMIO2HANDLE hMmio2VMMDevRAM;
+ /** Handle for the VMMDev Heap (PCI region \#2). */
+ PGMMMIO2HANDLE hMmio2Heap;
+} VMMDEV;
+/** Pointer to the shared VMM device state. */
+typedef VMMDEV *PVMMDEV;
+AssertCompileMemberAlignment(VMMDEV, CritSect, 8);
+AssertCompileMemberAlignment(VMMDEV, StatSlowIrqAck, 8);
+AssertCompileMemberAlignment(VMMDEV, cbGuestRAM, 8);
+AssertCompileMemberAlignment(VMMDEV, enmCpuHotPlugEvent, 4);
+AssertCompileMemberAlignment(VMMDEV, aFacilityStatuses, 8);
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+AssertCompileMemberAlignment(VMMDEV, TestingData.Value.u64Value, 8);
+AssertCompileMemberAlignment(VMMDEV, CritSectRw, 64);
+#endif
+
+
+/** @name VMMDev/HGCM accounting categories (indexes into VMMDEVR3::aHgcmAcc)
+ * @{ */
+/** Legacy, VMMDEV_REQUESTOR_USR_NOT_GIVEN, VMMDEV_REQUESTOR_USR_DRV,
+ * VMMDEV_REQUESTOR_USR_DRV_OTHER. */
+#define VMMDEV_HGCM_CATEGORY_KERNEL 0
+/** VMMDEV_REQUESTOR_USR_ROOT, VMMDEV_REQUESTOR_USR_SYSTEM */
+#define VMMDEV_HGCM_CATEGORY_ROOT 1
+/** VMMDEV_REQUESTOR_USR_RESERVED1, VMMDEV_REQUESTOR_USR_USER,
+ * VMMDEV_REQUESTOR_USR_GUEST */
+#define VMMDEV_HGCM_CATEGORY_USER 2
+/** Array size. */
+#define VMMDEV_HGCM_CATEGORY_MAX 3
+/** @} */
+
+/**
+ * State structure for the VMM device, ring-3 edition.
+ */
+typedef struct VMMDEVR3
+{
+ /** LUN\#0 + Status: VMMDev port base interface. */
+ PDMIBASE IBase;
+ /** LUN\#0: VMMDev port interface. */
+ PDMIVMMDEVPORT IPort;
+#ifdef VBOX_WITH_HGCM
+ /** LUN\#0: HGCM port interface. */
+ PDMIHGCMPORT IHGCMPort;
+ /** HGCM connector interface */
+ R3PTRTYPE(PPDMIHGCMCONNECTOR) pHGCMDrv;
+#endif
+ /** Pointer to base interface of the driver. */
+ R3PTRTYPE(PPDMIBASE) pDrvBase;
+ /** VMMDev connector interface */
+ R3PTRTYPE(PPDMIVMMDEVCONNECTOR) pDrv;
+ /** Pointer to the device instance.
+ * @note Only for interface methods to get their bearings. */
+ PPDMDEVINSR3 pDevIns;
+
+ /** R3 pointer to VMMDev RAM area */
+ R3PTRTYPE(VMMDevMemory *) pVMMDevRAMR3;
+
+ /** R3 pointer to VMMDev Heap RAM area. */
+ R3PTRTYPE(VMMDevMemory *) pVMMDevHeapR3;
+
+ /** Pointer to the credentials. */
+ R3PTRTYPE(VMMDEVCREDS *) pCredentials;
+ /** Set if pCredentials is using the RTMemSafer allocator, clear if heap. */
+ bool fSaferCredentials;
+ bool afAlignment[7];
+
+#ifdef VBOX_WITH_HGCM
+ /** Critical section to protect the list. */
+ RTCRITSECT critsectHGCMCmdList;
+ /** List of pending HGCM requests (VBOXHGCMCMD). */
+ RTLISTANCHORR3 listHGCMCmd;
+ /** Whether the HGCM events are already automatically enabled. */
+ uint32_t u32HGCMEnabled;
+ /** Saved state version of restored commands. */
+ uint32_t uSavedStateVersion;
+ RTMEMCACHE hHgcmCmdCache;
+ /** Accounting by for each requestor VMMDEV_REQUESTOR_USR_XXX group.
+ * Legacy requests ends up with VMMDEV_REQUESTOR_USR_NOT_GIVEN */
+ struct
+ {
+ /** The configured heap budget. */
+ uint64_t cbHeapBudgetConfig;
+ /** The currently available heap budget. */
+ uint64_t cbHeapBudget;
+ /** Message stats. */
+ STAMPROFILE StateMsgHeapUsage;
+ /** Budget overruns. */
+ STAMCOUNTER StatBudgetOverruns;
+ } aHgcmAcc[VMMDEV_HGCM_CATEGORY_MAX];
+ STAMPROFILE StatHgcmCmdArrival;
+ STAMPROFILE StatHgcmCmdCompletion;
+ STAMPROFILE StatHgcmCmdTotal;
+ STAMCOUNTER StatHgcmLargeCmdAllocs;
+ STAMCOUNTER StatHgcmFailedPageListLocking;
+#endif /* VBOX_WITH_HGCM */
+ STAMCOUNTER StatReqBufAllocs;
+ /** Per CPU request 4K sized buffers, allocated as needed. */
+ R3PTRTYPE(VMMDevRequestHeader *) apReqBufs[VMM_MAX_CPU_COUNT];
+
+ /** Status LUN: Shared folders LED */
+ struct
+ {
+ /** The LED. */
+ PDMLED Led;
+ /** The LED ports. */
+ PDMILEDPORTS ILeds;
+ /** Partner of ILeds. */
+ R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
+ } SharedFolders;
+
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+ /** The XML output file name (can be a named pipe, doesn't matter to us). */
+ R3PTRTYPE(char *) pszTestingXmlOutput;
+ /** Testing instance for dealing with the output. */
+ RTTEST hTestingTest;
+ /** The locking test thread (). */
+ PPDMTHREAD pTestingLockThread;
+#endif
+} VMMDEVR3;
+/** Pointer to the ring-3 VMM device state. */
+typedef VMMDEVR3 *PVMMDEVR3;
+
+
+/**
+ * State structure for the VMM device, ring-0 edition.
+ */
+typedef struct VMMDEVR0
+{
+ /** R0 pointer to VMMDev RAM area - first page only, could be NULL! */
+ R0PTRTYPE(VMMDevMemory *) pVMMDevRAMR0;
+} VMMDEVR0;
+/** Pointer to the ring-0 VMM device state. */
+typedef VMMDEVR0 *PVMMDEVR0;
+
+
+/**
+ * State structure for the VMM device, raw-mode edition.
+ */
+typedef struct VMMDEVRC
+{
+ /** R0 pointer to VMMDev RAM area - first page only, could be NULL! */
+ RCPTRTYPE(VMMDevMemory *) pVMMDevRAMRC;
+} VMMDEVRC;
+/** Pointer to the raw-mode VMM device state. */
+typedef VMMDEVRC *PVMMDEVRC;
+
+
+/** @typedef VMMDEVCC
+ * The VMMDEV device data for the current context. */
+typedef CTX_SUFF(VMMDEV) VMMDEVCC;
+/** @typedef PVMMDEVCC
+ * Pointer to the VMMDEV device for the current context. */
+typedef CTX_SUFF(PVMMDEV) PVMMDEVCC;
+
+
+void VMMDevNotifyGuest(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t fAddEvents);
+void VMMDevCtlSetGuestFilterMask(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t fOrMask, uint32_t fNotMask);
+
+
+/** The saved state version. */
+#define VMMDEV_SAVED_STATE_VERSION VMMDEV_SAVED_STATE_VERSION_VMM_MOUSE_EXTENDED_DATA
+/** The saved state version with VMMDev mouse buttons state and wheel movement data. */
+#define VMMDEV_SAVED_STATE_VERSION_VMM_MOUSE_EXTENDED_DATA 19
+/** The saved state version with display change data state. */
+#define VMMDEV_SAVED_STATE_VERSION_DISPLAY_CHANGE_DATA 18
+/** Updated HGCM commands. */
+#define VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS 17
+/** The saved state version with heartbeat state. */
+#define VMMDEV_SAVED_STATE_VERSION_HEARTBEAT 16
+/** The saved state version without heartbeat state. */
+#define VMMDEV_SAVED_STATE_VERSION_NO_HEARTBEAT 15
+/** The saved state version which is missing the guest facility statuses. */
+#define VMMDEV_SAVED_STATE_VERSION_MISSING_FACILITY_STATUSES 14
+/** The saved state version which is missing the guestInfo2 bits. */
+#define VMMDEV_SAVED_STATE_VERSION_MISSING_GUEST_INFO_2 13
+/** The saved state version used by VirtualBox 3.0.
+ * This doesn't have the config part. */
+#define VMMDEV_SAVED_STATE_VERSION_VBOX_30 11
+
+#endif /* !VBOX_INCLUDED_SRC_VMMDev_VMMDevState_h */
+
diff --git a/src/VBox/Devices/VMMDev/VMMDevTesting.cpp b/src/VBox/Devices/VMMDev/VMMDevTesting.cpp
new file mode 100644
index 00000000..e7ddc94e
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevTesting.cpp
@@ -0,0 +1,1111 @@
+/* $Id: VMMDevTesting.cpp $ */
+/** @file
+ * VMMDev - Testing Extensions.
+ *
+ * To enable: VBoxManage setextradata vmname VBoxInternal/Devices/VMMDev/0/Config/TestingEnabled 1
+ */
+
+/*
+ * Copyright (C) 2010-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DEV_VMM
+#include <VBox/VMMDev.h>
+#include <VBox/vmm/vmapi.h>
+#include <VBox/log.h>
+#include <VBox/err.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+#include <iprt/test.h>
+
+#ifdef IN_RING3
+# define USING_VMM_COMMON_DEFS /* HACK ALERT! We ONLY want the EMT thread handles, so the common defs doesn't matter. */
+# include <VBox/vmm/vmcc.h>
+#endif
+#include <VBox/AssertGuest.h>
+
+#include "VMMDevState.h"
+#include "VMMDevTesting.h"
+
+
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+
+#define VMMDEV_TESTING_OUTPUT(a) \
+ do \
+ { \
+ LogAlways(a);\
+ LogRel(a);\
+ } while (0)
+
+/**
+ * @callback_method_impl{FNIOMMMIONEWWRITE}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmmdevTestingMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
+{
+ RT_NOREF_PV(pvUser);
+
+ switch (off)
+ {
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3:
+#ifndef IN_RING3
+ return VINF_IOM_R3_MMIO_WRITE;
+#endif
+ case VMMDEV_TESTING_MMIO_OFF_NOP:
+ return VINF_SUCCESS;
+
+ default:
+ {
+ /*
+ * Readback register (64 bytes wide).
+ */
+ if ( ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK + VMMDEV_TESTING_READBACK_SIZE)
+#ifndef IN_RING3
+ || ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK_R3
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK_R3 + VMMDEV_TESTING_READBACK_SIZE)
+#endif
+ )
+ {
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ off &= VMMDEV_TESTING_READBACK_SIZE - 1;
+ switch (cb)
+ {
+ case 8: *(uint64_t *)&pThis->TestingData.abReadBack[off] = *(uint64_t const *)pv; break;
+ case 4: *(uint32_t *)&pThis->TestingData.abReadBack[off] = *(uint32_t const *)pv; break;
+ case 2: *(uint16_t *)&pThis->TestingData.abReadBack[off] = *(uint16_t const *)pv; break;
+ case 1: *(uint8_t *)&pThis->TestingData.abReadBack[off] = *(uint8_t const *)pv; break;
+ default: memcpy(&pThis->TestingData.abReadBack[off], pv, cb); break;
+ }
+ return VINF_SUCCESS;
+ }
+#ifndef IN_RING3
+ if ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK_R3
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK_R3 + 64)
+ return VINF_IOM_R3_MMIO_WRITE;
+#endif
+
+ break;
+ }
+
+ /*
+ * Odd NOP accesses.
+ */
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 1:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 2:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 3:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 4:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 5:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 6:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 7:
+#ifndef IN_RING3
+ return VINF_IOM_R3_MMIO_WRITE;
+#endif
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 1:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 2:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 3:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 4:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 5:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 6:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 7:
+ return VINF_SUCCESS;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNIOMMMIONEWREAD}
+ */
+static DECLCALLBACK(VBOXSTRICTRC) vmmdevTestingMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
+{
+ RT_NOREF_PV(pvUser);
+
+ switch (off)
+ {
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3:
+#ifndef IN_RING3
+ return VINF_IOM_R3_MMIO_READ;
+#endif
+ /* fall thru. */
+ case VMMDEV_TESTING_MMIO_OFF_NOP:
+ switch (cb)
+ {
+ case 8:
+ *(uint64_t *)pv = VMMDEV_TESTING_NOP_RET | ((uint64_t)VMMDEV_TESTING_NOP_RET << 32);
+ break;
+ case 4:
+ *(uint32_t *)pv = VMMDEV_TESTING_NOP_RET;
+ break;
+ case 2:
+ *(uint16_t *)pv = RT_LO_U16(VMMDEV_TESTING_NOP_RET);
+ break;
+ case 1:
+ *(uint8_t *)pv = (uint8_t)(VMMDEV_TESTING_NOP_RET & UINT8_MAX);
+ break;
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_5;
+ }
+ return VINF_SUCCESS;
+
+
+ default:
+ {
+ /*
+ * Readback register (64 bytes wide).
+ */
+ if ( ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK + 64)
+#ifndef IN_RING3
+ || ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK_R3
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK_R3 + 64)
+#endif
+ )
+ {
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ off &= 0x3f;
+ switch (cb)
+ {
+ case 8: *(uint64_t *)pv = *(uint64_t const *)&pThis->TestingData.abReadBack[off]; break;
+ case 4: *(uint32_t *)pv = *(uint32_t const *)&pThis->TestingData.abReadBack[off]; break;
+ case 2: *(uint16_t *)pv = *(uint16_t const *)&pThis->TestingData.abReadBack[off]; break;
+ case 1: *(uint8_t *)pv = *(uint8_t const *)&pThis->TestingData.abReadBack[off]; break;
+ default: memcpy(pv, &pThis->TestingData.abReadBack[off], cb); break;
+ }
+ return VINF_SUCCESS;
+ }
+#ifndef IN_RING3
+ if ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK_R3
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK_R3 + 64)
+ return VINF_IOM_R3_MMIO_READ;
+#endif
+ break;
+ }
+
+ /*
+ * Odd NOP accesses (for 16-bit code mainly).
+ */
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 1:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 2:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 3:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 4:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 5:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 6:
+ case VMMDEV_TESTING_MMIO_OFF_NOP_R3 + 7:
+#ifndef IN_RING3
+ return VINF_IOM_R3_MMIO_READ;
+#endif
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 1:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 2:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 3:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 4:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 5:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 6:
+ case VMMDEV_TESTING_MMIO_OFF_NOP + 7:
+ {
+ static uint8_t const s_abNopValue[8] =
+ {
+ VMMDEV_TESTING_NOP_RET & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 8) & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 16) & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 24) & 0xff,
+ VMMDEV_TESTING_NOP_RET & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 8) & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 16) & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 24) & 0xff,
+ };
+
+ memset(pv, 0xff, cb);
+ memcpy(pv, &s_abNopValue[off & 7], RT_MIN(8 - (off & 7), cb));
+ return VINF_SUCCESS;
+ }
+ }
+
+ return VINF_IOM_MMIO_UNUSED_FF;
+}
+
+#ifdef IN_RING3
+
+/**
+ * Executes the VMMDEV_TESTING_CMD_VALUE_REG command when the data is ready.
+ *
+ * @param pDevIns The PDM device instance.
+ * @param pThis The instance VMMDev data.
+ */
+static void vmmdevTestingCmdExec_ValueReg(PPDMDEVINS pDevIns, PVMMDEV pThis)
+{
+ char *pszRegNm = strchr(pThis->TestingData.String.sz, ':');
+ if (pszRegNm)
+ {
+ *pszRegNm++ = '\0';
+ pszRegNm = RTStrStrip(pszRegNm);
+ }
+ char *pszValueNm = RTStrStrip(pThis->TestingData.String.sz);
+ size_t const cchValueNm = strlen(pszValueNm);
+ if (cchValueNm && pszRegNm && *pszRegNm)
+ {
+ VMCPUID idCpu = PDMDevHlpGetCurrentCpuId(pDevIns);
+ uint64_t u64Value;
+ int rc2 = PDMDevHlpDBGFRegNmQueryU64(pDevIns, idCpu, pszRegNm, &u64Value);
+ if (RT_SUCCESS(rc2))
+ {
+ const char *pszWarn = rc2 == VINF_DBGF_TRUNCATED_REGISTER ? " truncated" : "";
+#if 1 /*!RTTestValue format*/
+ char szFormat[128], szValue[128];
+ RTStrPrintf(szFormat, sizeof(szFormat), "%%VR{%s}", pszRegNm);
+ rc2 = PDMDevHlpDBGFRegPrintf(pDevIns, idCpu, szValue, sizeof(szValue), szFormat);
+ if (RT_SUCCESS(rc2))
+ VMMDEV_TESTING_OUTPUT(("testing: VALUE '%s'%*s: %16s {reg=%s}%s\n",
+ pszValueNm,
+ (ssize_t)cchValueNm - 12 > 48 ? 0 : 48 - ((ssize_t)cchValueNm - 12), "",
+ szValue, pszRegNm, pszWarn));
+ else
+#endif
+ VMMDEV_TESTING_OUTPUT(("testing: VALUE '%s'%*s: %'9llu (%#llx) [0] {reg=%s}%s\n",
+ pszValueNm,
+ (ssize_t)cchValueNm - 12 > 48 ? 0 : 48 - ((ssize_t)cchValueNm - 12), "",
+ u64Value, u64Value, pszRegNm, pszWarn));
+ }
+ else
+ VMMDEV_TESTING_OUTPUT(("testing: error querying register '%s' for value '%s': %Rrc\n",
+ pszRegNm, pszValueNm, rc2));
+ }
+ else
+ VMMDEV_TESTING_OUTPUT(("testing: malformed register value '%s'/'%s'\n", pszValueNm, pszRegNm));
+}
+
+#endif /* IN_RING3 */
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWOUT}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+vmmdevTestingIoWrite(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+#ifdef IN_RING3
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+#endif
+ RT_NOREF_PV(pvUser);
+
+ switch (offPort)
+ {
+ /*
+ * The NOP I/O ports are used for performance measurements.
+ */
+ case VMMDEV_TESTING_IOPORT_NOP - VMMDEV_TESTING_IOPORT_BASE:
+ switch (cb)
+ {
+ case 4:
+ case 2:
+ case 1:
+ break;
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+ return VINF_SUCCESS;
+
+ case VMMDEV_TESTING_IOPORT_NOP_R3 - VMMDEV_TESTING_IOPORT_BASE:
+ switch (cb)
+ {
+ case 4:
+ case 2:
+ case 1:
+#ifndef IN_RING3
+ return VINF_IOM_R3_IOPORT_WRITE;
+#else
+ return VINF_SUCCESS;
+#endif
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+
+ /* The timestamp I/O ports are read-only. */
+ case VMMDEV_TESTING_IOPORT_TS_LOW - VMMDEV_TESTING_IOPORT_BASE:
+ case VMMDEV_TESTING_IOPORT_TS_HIGH - VMMDEV_TESTING_IOPORT_BASE:
+ break;
+
+ /*
+ * The command port (DWORD and WORD write only).
+ * (We have to allow WORD writes for 286, 186 and 8086 execution modes.)
+ */
+ case VMMDEV_TESTING_IOPORT_CMD - VMMDEV_TESTING_IOPORT_BASE:
+ if (cb == 2)
+ {
+ u32 |= VMMDEV_TESTING_CMD_MAGIC_HI_WORD;
+ cb = 4;
+ }
+ if (cb == 4)
+ {
+ pThis->u32TestingCmd = u32;
+ pThis->offTestingData = 0;
+ pThis->cbReadableTestingData = 0;
+ RT_ZERO(pThis->TestingData);
+ return VINF_SUCCESS;
+ }
+ break;
+
+ /*
+ * The data port. Used of providing data for a command.
+ */
+ case VMMDEV_TESTING_IOPORT_DATA - VMMDEV_TESTING_IOPORT_BASE:
+ {
+ uint32_t uCmd = pThis->u32TestingCmd;
+ uint32_t off = pThis->offTestingData;
+ switch (uCmd)
+ {
+ case VMMDEV_TESTING_CMD_INIT:
+ case VMMDEV_TESTING_CMD_SUB_NEW:
+ case VMMDEV_TESTING_CMD_FAILED:
+ case VMMDEV_TESTING_CMD_SKIPPED:
+ case VMMDEV_TESTING_CMD_PRINT:
+ if ( off < sizeof(pThis->TestingData.String.sz) - 1
+ && cb == 1)
+ {
+ if (u32)
+ {
+ pThis->TestingData.String.sz[off] = u32;
+ pThis->offTestingData = off + 1;
+ }
+ else
+ {
+#ifdef IN_RING3
+ pThis->TestingData.String.sz[off] = '\0';
+ switch (uCmd)
+ {
+ case VMMDEV_TESTING_CMD_INIT:
+ VMMDEV_TESTING_OUTPUT(("testing: INIT '%s'\n", pThis->TestingData.String.sz));
+ if (pThisCC->hTestingTest != NIL_RTTEST)
+ {
+ RTTestChangeName(pThisCC->hTestingTest, pThis->TestingData.String.sz);
+ RTTestBanner(pThisCC->hTestingTest);
+ }
+ break;
+ case VMMDEV_TESTING_CMD_SUB_NEW:
+ VMMDEV_TESTING_OUTPUT(("testing: SUB_NEW '%s'\n", pThis->TestingData.String.sz));
+ if (pThisCC->hTestingTest != NIL_RTTEST)
+ RTTestSub(pThisCC->hTestingTest, pThis->TestingData.String.sz);
+ break;
+ case VMMDEV_TESTING_CMD_FAILED:
+ if (pThisCC->hTestingTest != NIL_RTTEST)
+ RTTestFailed(pThisCC->hTestingTest, "%s", pThis->TestingData.String.sz);
+ VMMDEV_TESTING_OUTPUT(("testing: FAILED '%s'\n", pThis->TestingData.String.sz));
+ break;
+ case VMMDEV_TESTING_CMD_SKIPPED:
+ if (pThisCC->hTestingTest != NIL_RTTEST)
+ {
+ if (off)
+ RTTestSkipped(pThisCC->hTestingTest, "%s", pThis->TestingData.String.sz);
+ else
+ RTTestSkipped(pThisCC->hTestingTest, NULL);
+ }
+ VMMDEV_TESTING_OUTPUT(("testing: SKIPPED '%s'\n", pThis->TestingData.String.sz));
+ break;
+ case VMMDEV_TESTING_CMD_PRINT:
+ if (pThisCC->hTestingTest != NIL_RTTEST && off)
+ RTTestPrintf(pThisCC->hTestingTest, RTTESTLVL_ALWAYS, "%s", pThis->TestingData.String.sz);
+ VMMDEV_TESTING_OUTPUT(("testing: '%s'\n", pThis->TestingData.String.sz));
+ break;
+ }
+#else
+ return VINF_IOM_R3_IOPORT_WRITE;
+#endif
+ }
+ return VINF_SUCCESS;
+ }
+ break;
+
+ case VMMDEV_TESTING_CMD_TERM:
+ case VMMDEV_TESTING_CMD_SUB_DONE:
+ if (cb == 2)
+ {
+ if (off == 0)
+ {
+ pThis->TestingData.Error.c = u32;
+ pThis->offTestingData = 2;
+ break;
+ }
+ if (off == 2)
+ {
+ u32 <<= 16;
+ u32 |= pThis->TestingData.Error.c & UINT16_MAX;
+ cb = 4;
+ off = 0;
+ }
+ else
+ break;
+ }
+
+ if ( off == 0
+ && cb == 4)
+ {
+#ifdef IN_RING3
+ pThis->TestingData.Error.c = u32;
+ if (uCmd == VMMDEV_TESTING_CMD_TERM)
+ {
+ if (pThisCC->hTestingTest != NIL_RTTEST)
+ {
+ while (RTTestErrorCount(pThisCC->hTestingTest) < u32)
+ RTTestErrorInc(pThisCC->hTestingTest); /* A bit stupid, but does the trick. */
+ RTTestSubDone(pThisCC->hTestingTest);
+ RTTestSummaryAndDestroy(pThisCC->hTestingTest);
+ pThisCC->hTestingTest = NIL_RTTEST;
+ }
+ VMMDEV_TESTING_OUTPUT(("testing: TERM - %u errors\n", u32));
+ }
+ else
+ {
+ if (pThisCC->hTestingTest != NIL_RTTEST)
+ {
+ while (RTTestSubErrorCount(pThisCC->hTestingTest) < u32)
+ RTTestErrorInc(pThisCC->hTestingTest); /* A bit stupid, but does the trick. */
+ RTTestSubDone(pThisCC->hTestingTest);
+ }
+ VMMDEV_TESTING_OUTPUT(("testing: SUB_DONE - %u errors\n", u32));
+ }
+ return VINF_SUCCESS;
+#else
+ return VINF_IOM_R3_IOPORT_WRITE;
+#endif
+ }
+ break;
+
+ case VMMDEV_TESTING_CMD_VALUE:
+ if (cb == 4)
+ {
+ if (off == 0)
+ pThis->TestingData.Value.u64Value.s.Lo = u32;
+ else if (off == 4)
+ pThis->TestingData.Value.u64Value.s.Hi = u32;
+ else if (off == 8)
+ pThis->TestingData.Value.u32Unit = u32;
+ else
+ break;
+ pThis->offTestingData = off + 4;
+ return VINF_SUCCESS;
+ }
+ if (cb == 2)
+ {
+ if (off == 0)
+ pThis->TestingData.Value.u64Value.Words.w0 = (uint16_t)u32;
+ else if (off == 2)
+ pThis->TestingData.Value.u64Value.Words.w1 = (uint16_t)u32;
+ else if (off == 4)
+ pThis->TestingData.Value.u64Value.Words.w2 = (uint16_t)u32;
+ else if (off == 6)
+ pThis->TestingData.Value.u64Value.Words.w3 = (uint16_t)u32;
+ else if (off == 8)
+ pThis->TestingData.Value.u32Unit = (uint16_t)u32;
+ else if (off == 10)
+ pThis->TestingData.Value.u32Unit |= u32 << 16;
+ else
+ break;
+ pThis->offTestingData = off + 2;
+ return VINF_SUCCESS;
+ }
+
+ if ( off >= 12
+ && cb == 1
+ && off - 12 < sizeof(pThis->TestingData.Value.szName) - 1)
+ {
+ if (u32)
+ {
+ pThis->TestingData.Value.szName[off - 12] = u32;
+ pThis->offTestingData = off + 1;
+ }
+ else
+ {
+#ifdef IN_RING3
+ pThis->TestingData.Value.szName[off - 12] = '\0';
+
+ RTTESTUNIT enmUnit = (RTTESTUNIT)pThis->TestingData.Value.u32Unit;
+ if (enmUnit <= RTTESTUNIT_INVALID || enmUnit >= RTTESTUNIT_END)
+ {
+ VMMDEV_TESTING_OUTPUT(("Invalid log value unit %#x\n", pThis->TestingData.Value.u32Unit));
+ enmUnit = RTTESTUNIT_NONE;
+ }
+ if (pThisCC->hTestingTest != NIL_RTTEST)
+ RTTestValue(pThisCC->hTestingTest, pThis->TestingData.Value.szName,
+ pThis->TestingData.Value.u64Value.u, enmUnit);
+
+ VMMDEV_TESTING_OUTPUT(("testing: VALUE '%s'%*s: %'9llu (%#llx) [%u]\n",
+ pThis->TestingData.Value.szName,
+ off - 12 > 48 ? 0 : 48 - (off - 12), "",
+ pThis->TestingData.Value.u64Value.u, pThis->TestingData.Value.u64Value.u,
+ pThis->TestingData.Value.u32Unit));
+#else
+ return VINF_IOM_R3_IOPORT_WRITE;
+#endif
+ }
+ return VINF_SUCCESS;
+ }
+ break;
+
+
+ /*
+ * RTTestValue with the output from DBGFR3RegNmQuery.
+ */
+ case VMMDEV_TESTING_CMD_VALUE_REG:
+ {
+ if ( off < sizeof(pThis->TestingData.String.sz) - 1
+ && cb == 1)
+ {
+ pThis->TestingData.String.sz[off] = u32;
+ if (u32)
+ pThis->offTestingData = off + 1;
+ else
+#ifdef IN_RING3
+ vmmdevTestingCmdExec_ValueReg(pDevIns, pThis);
+#else
+ return VINF_IOM_R3_IOPORT_WRITE;
+#endif
+ return VINF_SUCCESS;
+ }
+ break;
+ }
+
+ /*
+ * Query configuration.
+ */
+ case VMMDEV_TESTING_CMD_QUERY_CFG:
+ {
+ switch (u32)
+ {
+ case VMMDEV_TESTING_CFG_DWORD0:
+ case VMMDEV_TESTING_CFG_DWORD1:
+ case VMMDEV_TESTING_CFG_DWORD2:
+ case VMMDEV_TESTING_CFG_DWORD3:
+ case VMMDEV_TESTING_CFG_DWORD4:
+ case VMMDEV_TESTING_CFG_DWORD5:
+ case VMMDEV_TESTING_CFG_DWORD6:
+ case VMMDEV_TESTING_CFG_DWORD7:
+ case VMMDEV_TESTING_CFG_DWORD8:
+ case VMMDEV_TESTING_CFG_DWORD9:
+ pThis->cbReadableTestingData = sizeof(pThis->TestingData.u32);
+ pThis->TestingData.u32 = pThis->au32TestingCfgDwords[u32 - VMMDEV_TESTING_CFG_DWORD0];
+ break;
+
+ case VMMDEV_TESTING_CFG_IS_NEM_LINUX:
+ case VMMDEV_TESTING_CFG_IS_NEM_WINDOWS:
+ case VMMDEV_TESTING_CFG_IS_NEM_DARWIN:
+ {
+ pThis->cbReadableTestingData = sizeof(pThis->TestingData.b);
+#if defined(RT_OS_DARWIN)
+ pThis->TestingData.b = u32 == VMMDEV_TESTING_CFG_IS_NEM_DARWIN
+ && PDMDevHlpGetMainExecutionEngine(pDevIns) == VM_EXEC_ENGINE_NATIVE_API;
+#elif defined(RT_OS_LINUX)
+ pThis->TestingData.b = u32 == VMMDEV_TESTING_CFG_IS_NEM_LINUX
+ && PDMDevHlpGetMainExecutionEngine(pDevIns) == VM_EXEC_ENGINE_NATIVE_API;
+#elif defined(RT_OS_WINDOWS)
+ pThis->TestingData.b = u32 == VMMDEV_TESTING_CFG_IS_NEM_WINDOWS
+ && PDMDevHlpGetMainExecutionEngine(pDevIns) == VM_EXEC_ENGINE_NATIVE_API;
+#else
+ pThis->TestingData.b = false;
+#endif
+ break;
+ }
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ Log(("VMMDEV_TESTING_IOPORT_CMD: bad access; cmd=%#x off=%#x cb=%#x u32=%#x\n", uCmd, off, cb, u32));
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Configure the locking contention test.
+ */
+ case VMMDEV_TESTING_IOPORT_LOCKED_HI - VMMDEV_TESTING_IOPORT_BASE:
+ case VMMDEV_TESTING_IOPORT_LOCKED_LO - VMMDEV_TESTING_IOPORT_BASE:
+ switch (cb)
+ {
+ case 4:
+ {
+ bool const fReadWriteSection = pThis->TestingLockControl.s.fReadWriteSection;
+ int rc;
+#ifndef IN_RING3
+ if (!pThis->TestingLockControl.s.fMustSucceed)
+ {
+ if (!fReadWriteSection)
+ rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_IOM_R3_IOPORT_WRITE);
+ else
+ rc = PDMDevHlpCritSectRwEnterExcl(pDevIns, &pThis->CritSectRw, VINF_IOM_R3_IOPORT_WRITE);
+ if (rc != VINF_SUCCESS)
+ return rc;
+ }
+ else
+#endif
+ {
+ if (!fReadWriteSection)
+ rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_SUCCESS);
+ else
+ rc = PDMDevHlpCritSectRwEnterExcl(pDevIns, &pThis->CritSectRw, VINF_SUCCESS);
+ AssertRCReturn(rc, rc);
+ }
+
+ if (offPort == VMMDEV_TESTING_IOPORT_LOCKED_LO - VMMDEV_TESTING_IOPORT_BASE)
+ {
+ if (pThis->TestingLockControl.au32[0] != u32)
+ {
+ pThis->TestingLockControl.au32[0] = u32;
+ PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hTestingLockEvt);
+ }
+ }
+ else
+ {
+ u32 &= ~VMMDEV_TESTING_LOCKED_HI_MBZ_MASK;
+ if (pThis->TestingLockControl.au32[1] != u32)
+ {
+ pThis->TestingLockControl.au32[1] = u32;
+ PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hTestingLockEvt);
+ }
+ }
+
+ if (!fReadWriteSection)
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ else
+ PDMDevHlpCritSectRwLeaveExcl(pDevIns, &pThis->CritSectRw);
+ return VINF_SUCCESS;
+ }
+
+ case 2:
+ case 1:
+ ASSERT_GUEST_FAILED();
+ break;
+
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+
+ default:
+ break;
+ }
+
+ return VERR_IOM_IOPORT_UNUSED;
+}
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTNEWIN}
+ */
+static DECLCALLBACK(VBOXSTRICTRC)
+vmmdevTestingIoRead(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ RT_NOREF_PV(pvUser);
+
+ switch (offPort)
+ {
+ /*
+ * The NOP I/O ports are used for performance measurements.
+ */
+ case VMMDEV_TESTING_IOPORT_NOP - VMMDEV_TESTING_IOPORT_BASE:
+ switch (cb)
+ {
+ case 4:
+ case 2:
+ case 1:
+ break;
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+ *pu32 = VMMDEV_TESTING_NOP_RET;
+ return VINF_SUCCESS;
+
+ case VMMDEV_TESTING_IOPORT_NOP_R3 - VMMDEV_TESTING_IOPORT_BASE:
+ switch (cb)
+ {
+ case 4:
+ case 2:
+ case 1:
+#ifndef IN_RING3
+ return VINF_IOM_R3_IOPORT_READ;
+#else
+ *pu32 = VMMDEV_TESTING_NOP_RET;
+ return VINF_SUCCESS;
+#endif
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+
+ /*
+ * The timestamp I/O ports are obviously used for getting a good fix
+ * on the current time (as seen by the host?).
+ *
+ * The high word is latched when reading the low, so reading low + high
+ * gives you a 64-bit timestamp value.
+ */
+ case VMMDEV_TESTING_IOPORT_TS_LOW - VMMDEV_TESTING_IOPORT_BASE:
+ if (cb == 4)
+ {
+ uint64_t NowTS = RTTimeNanoTS();
+ *pu32 = (uint32_t)NowTS;
+ pThis->u32TestingHighTimestamp = (uint32_t)(NowTS >> 32);
+ return VINF_SUCCESS;
+ }
+ break;
+
+ case VMMDEV_TESTING_IOPORT_TS_HIGH - VMMDEV_TESTING_IOPORT_BASE:
+ if (cb == 4)
+ {
+ *pu32 = pThis->u32TestingHighTimestamp;
+ return VINF_SUCCESS;
+ }
+ break;
+
+ /*
+ * Just return the current locking configuration value after first
+ * acquiring the lock of course.
+ */
+ case VMMDEV_TESTING_IOPORT_LOCKED_LO - VMMDEV_TESTING_IOPORT_BASE:
+ case VMMDEV_TESTING_IOPORT_LOCKED_HI - VMMDEV_TESTING_IOPORT_BASE:
+ switch (cb)
+ {
+ case 4:
+ case 2:
+ case 1:
+ {
+ /*
+ * Check configuration and enter the designation critical
+ * section in the specific fashion.
+ */
+ bool const fReadWriteSection = pThis->TestingLockControl.s.fReadWriteSection;
+ bool const fEmtShared = pThis->TestingLockControl.s.fEmtShared;
+ int rc;
+#ifndef IN_RING3
+ if (!pThis->TestingLockControl.s.fMustSucceed)
+ {
+ if (!fReadWriteSection)
+ rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_IOM_R3_IOPORT_READ);
+ else if (!fEmtShared)
+ rc = PDMDevHlpCritSectRwEnterExcl(pDevIns, &pThis->CritSectRw, VINF_IOM_R3_IOPORT_READ);
+ else
+ rc = PDMDevHlpCritSectRwEnterShared(pDevIns, &pThis->CritSectRw, VINF_IOM_R3_IOPORT_READ);
+ if (rc != VINF_SUCCESS)
+ return rc;
+ }
+ else
+#endif
+ {
+ if (!fReadWriteSection)
+ rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_SUCCESS);
+ else if (!fEmtShared)
+ rc = PDMDevHlpCritSectRwEnterExcl(pDevIns, &pThis->CritSectRw, VINF_SUCCESS);
+ else
+ rc = PDMDevHlpCritSectRwEnterShared(pDevIns, &pThis->CritSectRw, VINF_SUCCESS);
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Grab return value and, if requested, hold for a while.
+ */
+ *pu32 = pThis->TestingLockControl.au32[ offPort
+ - (VMMDEV_TESTING_IOPORT_LOCKED_LO - VMMDEV_TESTING_IOPORT_BASE)];
+ uint64_t cTicks = (uint64_t)pThis->TestingLockControl.s.cKiloTicksEmtHold * _1K;
+ if (cTicks)
+ {
+ uint64_t const uStartTick = ASMReadTSC();
+ do
+ {
+ ASMNopPause();
+ ASMNopPause();
+ } while (ASMReadTSC() - uStartTick < cTicks);
+ }
+
+ /*
+ * Leave.
+ */
+ if (!fReadWriteSection)
+ PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ else if (!fEmtShared)
+ PDMDevHlpCritSectRwLeaveExcl(pDevIns, &pThis->CritSectRw);
+ else
+ PDMDevHlpCritSectRwLeaveShared(pDevIns, &pThis->CritSectRw);
+ return VINF_SUCCESS;
+ }
+
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+
+ /*
+ * The command registers is write-only.
+ */
+ case VMMDEV_TESTING_IOPORT_CMD - VMMDEV_TESTING_IOPORT_BASE:
+ break;
+
+ /*
+ * The data register is only readable after a query command, otherwise it
+ * behaves as an undefined port. Return zeros if the guest reads too much.
+ */
+ case VMMDEV_TESTING_IOPORT_DATA - VMMDEV_TESTING_IOPORT_BASE:
+ if (pThis->cbReadableTestingData > 0)
+ {
+ if (pThis->offTestingData < pThis->cbReadableTestingData)
+ {
+ switch (RT_MIN(cb, pThis->cbReadableTestingData - pThis->offTestingData))
+ {
+ case 1:
+ *pu32 = pThis->TestingData.ab[pThis->offTestingData++];
+ break;
+ case 2:
+ *pu32 = pThis->TestingData.ab[pThis->offTestingData]
+ | ((uint32_t)pThis->TestingData.ab[pThis->offTestingData + 1] << 8);
+ pThis->offTestingData += 2;
+ break;
+ case 3:
+ *pu32 = pThis->TestingData.ab[pThis->offTestingData]
+ | ((uint32_t)pThis->TestingData.ab[pThis->offTestingData + 1] << 8)
+ | ((uint32_t)pThis->TestingData.ab[pThis->offTestingData + 2] << 16);
+ pThis->offTestingData += 3;
+ break;
+ case 4:
+ *pu32 = pThis->TestingData.ab[pThis->offTestingData]
+ | ((uint32_t)pThis->TestingData.ab[pThis->offTestingData + 1] << 8)
+ | ((uint32_t)pThis->TestingData.ab[pThis->offTestingData + 2] << 16)
+ | ((uint32_t)pThis->TestingData.ab[pThis->offTestingData + 3] << 24);
+ pThis->offTestingData += 4;
+ break;
+ }
+ }
+ else
+ *pu32 = 0;
+ return VINF_SUCCESS;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return VERR_IOM_IOPORT_UNUSED;
+}
+
+#ifdef IN_RING3
+
+/**
+ * @callback_method_impl{FNPDMTHREADDEV}
+ */
+static DECLCALLBACK(int) vmmdevR3TestingLockingThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVM pVM = PDMDevHlpGetVM(pDevIns);
+ AssertPtr(pVM);
+
+ while (RT_LIKELY(pThread->enmState == PDMTHREADSTATE_RUNNING))
+ {
+ int rc;
+ uint32_t cNsNextWait = 0;
+ uint32_t const fCfgHi = pThis->TestingLockControl.au32[1];
+ if (fCfgHi & VMMDEV_TESTING_LOCKED_HI_ENABLED)
+ {
+ /*
+ * take lock
+ */
+ if (!(fCfgHi & VMMDEV_TESTING_LOCKED_HI_TYPE_RW))
+ rc = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_SUCCESS);
+ else if (!(fCfgHi & VMMDEV_TESTING_LOCKED_HI_THREAD_SHARED))
+ rc = PDMDevHlpCritSectRwEnterExcl(pDevIns, &pThis->CritSectRw, VINF_SUCCESS);
+ else
+ rc = PDMDevHlpCritSectRwEnterShared(pDevIns, &pThis->CritSectRw, VINF_SUCCESS);
+ AssertLogRelRCReturn(rc, rc);
+
+ /*
+ * Delay releasing lock.
+ */
+ cNsNextWait = pThis->TestingLockControl.s.cUsBetween * RT_NS_1US;
+ if (pThis->TestingLockControl.s.cUsHold)
+ {
+ PDMDevHlpSUPSemEventWaitNsRelIntr(pDevIns, pThis->hTestingLockEvt, pThis->TestingLockControl.s.cUsHold);
+ if (pThis->TestingLockControl.s.fPokeBeforeRelease)
+ VMCC_FOR_EACH_VMCPU_STMT(pVM, RTThreadPoke(pVCpu->hThread));
+ }
+
+ /*
+ * Release lock.
+ */
+ if (!(fCfgHi & VMMDEV_TESTING_LOCKED_HI_TYPE_RW))
+ rc = PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
+ else if (!(fCfgHi & VMMDEV_TESTING_LOCKED_HI_THREAD_SHARED))
+ rc = PDMDevHlpCritSectRwLeaveExcl(pDevIns, &pThis->CritSectRw);
+ else
+ rc = PDMDevHlpCritSectRwLeaveShared(pDevIns, &pThis->CritSectRw);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ /*
+ * Wait for the next iteration.
+ */
+ if (RT_LIKELY(pThread->enmState == PDMTHREADSTATE_RUNNING))
+ { /* likely */ }
+ else
+ break;
+ if (cNsNextWait > 0)
+ PDMDevHlpSUPSemEventWaitNsRelIntr(pDevIns, pThis->hTestingLockEvt, cNsNextWait);
+ else
+ PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hTestingLockEvt, RT_INDEFINITE_WAIT);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNPDMTHREADWAKEUPDEV}
+ */
+static DECLCALLBACK(int) vmmdevR3TestingLockingThreadWakeup(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ RT_NOREF(pThread);
+ return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hTestingLockEvt);
+}
+
+
+/**
+ * Initializes the testing part of the VMMDev if enabled.
+ *
+ * @param pDevIns The VMMDev device instance.
+ */
+void vmmdevR3TestingTerminate(PPDMDEVINS pDevIns)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+ if (!pThis->fTestingEnabled)
+ return;
+
+ if (pThisCC->hTestingTest != NIL_RTTEST)
+ {
+ RTTestFailed(pThisCC->hTestingTest, "Still open at vmmdev destruction.");
+ RTTestSummaryAndDestroy(pThisCC->hTestingTest);
+ pThisCC->hTestingTest = NIL_RTTEST;
+ }
+}
+
+
+/**
+ * Initializes the testing part of the VMMDev if enabled.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The VMMDev device instance.
+ */
+int vmmdevR3TestingInitialize(PPDMDEVINS pDevIns)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
+ int rc;
+
+ if (!pThis->fTestingEnabled)
+ return VINF_SUCCESS;
+
+ if (pThis->fTestingMMIO)
+ {
+ /*
+ * Register a chunk of MMIO memory that we'll use for various
+ * tests interfaces. Optional, needs to be explicitly enabled.
+ */
+ rc = PDMDevHlpMmioCreateAndMap(pDevIns, VMMDEV_TESTING_MMIO_BASE, VMMDEV_TESTING_MMIO_SIZE,
+ vmmdevTestingMmioWrite, vmmdevTestingMmioRead,
+ IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
+ "VMMDev Testing", &pThis->hMmioTesting);
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Register the I/O ports used for testing.
+ */
+ rc = PDMDevHlpIoPortCreateAndMap(pDevIns, VMMDEV_TESTING_IOPORT_BASE, VMMDEV_TESTING_IOPORT_COUNT,
+ vmmdevTestingIoWrite, vmmdevTestingIoRead, "VMMDev Testing", NULL /*paExtDescs*/,
+ &pThis->hIoPortTesting);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Initialize the read/write critical section used for the locking tests.
+ */
+ rc = PDMDevHlpCritSectRwInit(pDevIns, &pThis->CritSectRw, RT_SRC_POS, "VMMLockRW");
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Create the locking thread.
+ */
+ rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hTestingLockEvt);
+ AssertRCReturn(rc, rc);
+ rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->pTestingLockThread, NULL /*pvUser*/, vmmdevR3TestingLockingThread,
+ vmmdevR3TestingLockingThreadWakeup, 0 /*cbStack*/, RTTHREADTYPE_IO, "VMMLockT");
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Open the XML output file(/pipe/whatever) if specfied.
+ */
+ rc = RTTestCreateEx("VMMDevTesting", RTTEST_C_USE_ENV | RTTEST_C_NO_TLS | RTTEST_C_XML_DELAY_TOP_TEST,
+ RTTESTLVL_DEBUG, -1 /*iNativeTestPipe*/, pThisCC->pszTestingXmlOutput, &pThisCC->hTestingTest);
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, "Error creating testing instance");
+
+ return VINF_SUCCESS;
+}
+
+#else /* !IN_RING3 */
+
+/**
+ * Does the ring-0/raw-mode initialization of the testing part if enabled.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The VMMDev device instance.
+ */
+int vmmdevRZTestingInitialize(PPDMDEVINS pDevIns)
+{
+ PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
+ int rc;
+
+ if (!pThis->fTestingEnabled)
+ return VINF_SUCCESS;
+
+ if (pThis->fTestingMMIO)
+ {
+ rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioTesting, vmmdevTestingMmioWrite, vmmdevTestingMmioRead, NULL);
+ AssertRCReturn(rc, rc);
+ }
+
+ rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPortTesting, vmmdevTestingIoWrite, vmmdevTestingIoRead, NULL);
+ AssertRCReturn(rc, rc);
+
+ return VINF_SUCCESS;
+}
+
+#endif /* !IN_RING3 */
+#endif /* !VBOX_WITHOUT_TESTING_FEATURES */
+
diff --git a/src/VBox/Devices/VMMDev/VMMDevTesting.h b/src/VBox/Devices/VMMDev/VMMDevTesting.h
new file mode 100644
index 00000000..40c3ab29
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevTesting.h
@@ -0,0 +1,46 @@
+/* $Id: VMMDevTesting.h $ */
+/** @file
+ * VMMDev - Guest <-> VMM/Host communication device, internal header.
+ */
+
+/*
+ * Copyright (C) 2010-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+#ifndef VBOX_INCLUDED_SRC_VMMDev_VMMDevTesting_h
+#define VBOX_INCLUDED_SRC_VMMDev_VMMDevTesting_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/types.h>
+#include <VBox/VMMDevTesting.h>
+
+RT_C_DECLS_BEGIN
+
+int vmmdevR3TestingInitialize(PPDMDEVINS pDevIns);
+void vmmdevR3TestingTerminate(PPDMDEVINS pDevIns);
+int vmmdevRZTestingInitialize(PPDMDEVINS pDevIns);
+
+RT_C_DECLS_END
+
+#endif /* !VBOX_INCLUDED_SRC_VMMDev_VMMDevTesting_h */
+