summaryrefslogtreecommitdiffstats
path: root/src/VBox/Devices/VMMDev
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/Devices/VMMDev')
-rw-r--r--src/VBox/Devices/VMMDev/Makefile.kup0
-rw-r--r--src/VBox/Devices/VMMDev/VMMDev.cpp4806
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevHGCM.cpp2426
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevHGCM.h49
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevState.h456
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevTesting.cpp776
-rw-r--r--src/VBox/Devices/VMMDev/VMMDevTesting.h35
7 files changed, 8548 insertions, 0 deletions
diff --git a/src/VBox/Devices/VMMDev/Makefile.kup b/src/VBox/Devices/VMMDev/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/Makefile.kup
diff --git a/src/VBox/Devices/VMMDev/VMMDev.cpp b/src/VBox/Devices/VMMDev/VMMDev.cpp
new file mode 100644
index 00000000..25dd6ddb
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDev.cpp
@@ -0,0 +1,4806 @@
+/* $Id: VMMDev.cpp $ */
+/** @file
+ * VMMDev - Guest <-> VMM/Host communication device.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/** @page pg_vmmdev The VMM Device.
+ *
+ * The VMM device is a custom hardware device emulation for communicating with
+ * the guest additions.
+ *
+ * Whenever host wants to inform guest about something an IRQ notification will
+ * be raised.
+ *
+ * VMMDev PDM interface will contain the guest notification method.
+ *
+ * There is a 32 bit event mask which will be read by guest on an interrupt. A
+ * non zero bit in the mask means that the specific event occurred and requires
+ * processing on guest side.
+ *
+ * After reading the event mask guest must issue a generic request
+ * AcknowlegdeEvents.
+ *
+ * IRQ line is set to 1 (request) if there are unprocessed events, that is the
+ * event mask is not zero.
+ *
+ * After receiving an interrupt and checking event mask, the guest must process
+ * events using the event specific mechanism.
+ *
+ * That is if mouse capabilities were changed, guest will use
+ * VMMDev_GetMouseStatus generic request.
+ *
+ * Event mask is only a set of flags indicating that guest must proceed with a
+ * procedure.
+ *
+ * Unsupported events are therefore ignored. The guest additions must inform
+ * host which events they want to receive, to avoid unnecessary IRQ processing.
+ * By default no events are signalled to guest.
+ *
+ * This seems to be fast method. It requires only one context switch for an
+ * event processing.
+ *
+ *
+ * @section sec_vmmdev_heartbeat Heartbeat
+ *
+ * The heartbeat is a feature to monitor whether the guest OS is hung or not.
+ *
+ * The main kernel component of the guest additions, VBoxGuest, sets up a timer
+ * at a frequency returned by VMMDevReq_HeartbeatConfigure
+ * (VMMDevReqHeartbeat::cNsInterval, VMMDEV::cNsHeartbeatInterval) and performs
+ * a VMMDevReq_GuestHeartbeat request every time the timer ticks.
+ *
+ * The host side (VMMDev) arms a timer with a more distant deadline
+ * (VMMDEV::cNsHeartbeatTimeout), twice cNsHeartbeatInterval by default. Each
+ * time a VMMDevReq_GuestHeartbeat request comes in, the timer is rearmed with
+ * the same relative deadline. So, as long as VMMDevReq_GuestHeartbeat comes
+ * when they should, the host timer will never fire.
+ *
+ * When the timer fires, we consider the guest as hung / flatlined / dead.
+ * Currently we only LogRel that, but it's easy to extend this with an event in
+ * Main API.
+ *
+ * Should the guest reawaken at some later point, we LogRel that event and
+ * continue as normal. Again something which would merit an API event.
+ *
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+/* Enable dev_vmm Log3 statements to get IRQ-related logging. */
+#define LOG_GROUP LOG_GROUP_DEV_VMM
+#include <VBox/AssertGuest.h>
+#include <VBox/VMMDev.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/log.h>
+#include <VBox/param.h>
+#include <iprt/path.h>
+#include <iprt/dir.h>
+#include <iprt/file.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/err.h>
+#include <VBox/vmm/vm.h> /* for VM_IS_EMT */
+#include <VBox/dbg.h>
+#include <VBox/version.h>
+
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/assert.h>
+#include <iprt/buildconfig.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+#ifndef IN_RC
+# include <iprt/mem.h>
+#endif
+#ifdef IN_RING3
+# include <iprt/uuid.h>
+#endif
+
+#include "VMMDevState.h"
+#ifdef VBOX_WITH_HGCM
+# include "VMMDevHGCM.h"
+#endif
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+# include "VMMDevTesting.h"
+#endif
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define VMMDEV_INTERFACE_VERSION_IS_1_03(s) \
+ ( RT_HIWORD((s)->guestInfo.interfaceVersion) == 1 \
+ && RT_LOWORD((s)->guestInfo.interfaceVersion) == 3 )
+
+#define VMMDEV_INTERFACE_VERSION_IS_OK(additionsVersion) \
+ ( RT_HIWORD(additionsVersion) == RT_HIWORD(VMMDEV_VERSION) \
+ && RT_LOWORD(additionsVersion) <= RT_LOWORD(VMMDEV_VERSION) )
+
+#define VMMDEV_INTERFACE_VERSION_IS_OLD(additionsVersion) \
+ ( (RT_HIWORD(additionsVersion) < RT_HIWORD(VMMDEV_VERSION) \
+ || ( RT_HIWORD(additionsVersion) == RT_HIWORD(VMMDEV_VERSION) \
+ && RT_LOWORD(additionsVersion) <= RT_LOWORD(VMMDEV_VERSION) ) )
+
+#define VMMDEV_INTERFACE_VERSION_IS_TOO_OLD(additionsVersion) \
+ ( RT_HIWORD(additionsVersion) < RT_HIWORD(VMMDEV_VERSION) )
+
+#define VMMDEV_INTERFACE_VERSION_IS_NEW(additionsVersion) \
+ ( RT_HIWORD(additionsVersion) > RT_HIWORD(VMMDEV_VERSION) \
+ || ( RT_HIWORD(additionsVersion) == RT_HIWORD(VMMDEV_VERSION) \
+ && RT_LOWORD(additionsVersion) > RT_LOWORD(VMMDEV_VERSION) ) )
+
+/** Default interval in nanoseconds between guest heartbeats.
+ * Used when no HeartbeatInterval is set in CFGM and for setting
+ * HB check timer if the guest's heartbeat frequency is less than 1Hz. */
+#define VMMDEV_HEARTBEAT_DEFAULT_INTERVAL (2U*RT_NS_1SEC_64)
+
+
+#ifndef VBOX_DEVICE_STRUCT_TESTCASE
+#ifdef IN_RING3
+
+/* -=-=-=-=- Misc Helpers -=-=-=-=- */
+
+/**
+ * Log information about the Guest Additions.
+ *
+ * @param pGuestInfo The information we've got from the Guest Additions driver.
+ */
+static void vmmdevLogGuestOsInfo(VBoxGuestInfo *pGuestInfo)
+{
+ const char *pszOs;
+ switch (pGuestInfo->osType & ~VBOXOSTYPE_x64)
+ {
+ case VBOXOSTYPE_DOS: pszOs = "DOS"; break;
+ case VBOXOSTYPE_Win31: pszOs = "Windows 3.1"; break;
+ case VBOXOSTYPE_Win9x: pszOs = "Windows 9x"; break;
+ case VBOXOSTYPE_Win95: pszOs = "Windows 95"; break;
+ case VBOXOSTYPE_Win98: pszOs = "Windows 98"; break;
+ case VBOXOSTYPE_WinMe: pszOs = "Windows Me"; break;
+ case VBOXOSTYPE_WinNT: pszOs = "Windows NT"; break;
+ case VBOXOSTYPE_WinNT3x: pszOs = "Windows NT 3.x"; break;
+ case VBOXOSTYPE_WinNT4: pszOs = "Windows NT4"; break;
+ case VBOXOSTYPE_Win2k: pszOs = "Windows 2k"; break;
+ case VBOXOSTYPE_WinXP: pszOs = "Windows XP"; break;
+ case VBOXOSTYPE_Win2k3: pszOs = "Windows 2k3"; break;
+ case VBOXOSTYPE_WinVista: pszOs = "Windows Vista"; break;
+ case VBOXOSTYPE_Win2k8: pszOs = "Windows 2k8"; break;
+ case VBOXOSTYPE_Win7: pszOs = "Windows 7"; break;
+ case VBOXOSTYPE_Win8: pszOs = "Windows 8"; break;
+ case VBOXOSTYPE_Win2k12_x64 & ~VBOXOSTYPE_x64: pszOs = "Windows 2k12"; break;
+ case VBOXOSTYPE_Win81: pszOs = "Windows 8.1"; break;
+ case VBOXOSTYPE_Win10: pszOs = "Windows 10"; break;
+ case VBOXOSTYPE_Win2k16_x64 & ~VBOXOSTYPE_x64: pszOs = "Windows 2k16"; break;
+ case VBOXOSTYPE_OS2: pszOs = "OS/2"; break;
+ case VBOXOSTYPE_OS2Warp3: pszOs = "OS/2 Warp 3"; break;
+ case VBOXOSTYPE_OS2Warp4: pszOs = "OS/2 Warp 4"; break;
+ case VBOXOSTYPE_OS2Warp45: pszOs = "OS/2 Warp 4.5"; break;
+ case VBOXOSTYPE_ECS: pszOs = "OS/2 ECS"; break;
+ case VBOXOSTYPE_OS21x: pszOs = "OS/2 2.1x"; break;
+ case VBOXOSTYPE_Linux: pszOs = "Linux"; break;
+ case VBOXOSTYPE_Linux22: pszOs = "Linux 2.2"; break;
+ case VBOXOSTYPE_Linux24: pszOs = "Linux 2.4"; break;
+ case VBOXOSTYPE_Linux26: pszOs = "Linux >= 2.6"; break;
+ case VBOXOSTYPE_ArchLinux: pszOs = "ArchLinux"; break;
+ case VBOXOSTYPE_Debian: pszOs = "Debian"; break;
+ case VBOXOSTYPE_OpenSUSE: pszOs = "openSUSE"; break;
+ case VBOXOSTYPE_FedoraCore: pszOs = "Fedora"; break;
+ case VBOXOSTYPE_Gentoo: pszOs = "Gentoo"; break;
+ case VBOXOSTYPE_Mandriva: pszOs = "Mandriva"; break;
+ case VBOXOSTYPE_RedHat: pszOs = "RedHat"; break;
+ case VBOXOSTYPE_Turbolinux: pszOs = "TurboLinux"; break;
+ case VBOXOSTYPE_Ubuntu: pszOs = "Ubuntu"; break;
+ case VBOXOSTYPE_Xandros: pszOs = "Xandros"; break;
+ case VBOXOSTYPE_Oracle: pszOs = "Oracle Linux"; break;
+ case VBOXOSTYPE_FreeBSD: pszOs = "FreeBSD"; break;
+ case VBOXOSTYPE_OpenBSD: pszOs = "OpenBSD"; break;
+ case VBOXOSTYPE_NetBSD: pszOs = "NetBSD"; break;
+ case VBOXOSTYPE_Netware: pszOs = "Netware"; break;
+ case VBOXOSTYPE_Solaris: pszOs = "Solaris"; break;
+ case VBOXOSTYPE_OpenSolaris: pszOs = "OpenSolaris"; break;
+ case VBOXOSTYPE_Solaris11_x64 & ~VBOXOSTYPE_x64: pszOs = "Solaris 11"; break;
+ case VBOXOSTYPE_MacOS: pszOs = "Mac OS X"; break;
+ case VBOXOSTYPE_MacOS106: pszOs = "Mac OS X 10.6"; break;
+ case VBOXOSTYPE_MacOS107_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.7"; break;
+ case VBOXOSTYPE_MacOS108_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.8"; break;
+ case VBOXOSTYPE_MacOS109_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.9"; break;
+ case VBOXOSTYPE_MacOS1010_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.10"; break;
+ case VBOXOSTYPE_MacOS1011_x64 & ~VBOXOSTYPE_x64: pszOs = "Mac OS X 10.11"; break;
+ case VBOXOSTYPE_MacOS1012_x64 & ~VBOXOSTYPE_x64: pszOs = "macOS 10.12"; break;
+ case VBOXOSTYPE_MacOS1013_x64 & ~VBOXOSTYPE_x64: pszOs = "macOS 10.13"; break;
+ case VBOXOSTYPE_Haiku: pszOs = "Haiku"; break;
+ default: pszOs = "unknown"; break;
+ }
+ LogRel(("VMMDev: Guest Additions information report: Interface = 0x%08X osType = 0x%08X (%s, %u-bit)\n",
+ pGuestInfo->interfaceVersion, pGuestInfo->osType, pszOs,
+ pGuestInfo->osType & VBOXOSTYPE_x64 ? 64 : 32));
+}
+
+
+/**
+ * Sets the IRQ (raise it or lower it) for 1.03 additions.
+ *
+ * @param pThis The VMMDev state.
+ * @thread Any.
+ * @remarks Must be called owning the critical section.
+ */
+static void vmmdevSetIRQ_Legacy(PVMMDEV pThis)
+{
+ if (pThis->fu32AdditionsOk)
+ {
+ /* Filter unsupported events */
+ uint32_t fEvents = pThis->u32HostEventFlags & pThis->CTX_SUFF(pVMMDevRAM)->V.V1_03.u32GuestEventMask;
+
+ Log(("vmmdevSetIRQ: fEvents=%#010x, u32HostEventFlags=%#010x, u32GuestEventMask=%#010x.\n",
+ fEvents, pThis->u32HostEventFlags, pThis->CTX_SUFF(pVMMDevRAM)->V.V1_03.u32GuestEventMask));
+
+ /* Move event flags to VMMDev RAM */
+ pThis->CTX_SUFF(pVMMDevRAM)->V.V1_03.u32HostEvents = fEvents;
+
+ uint32_t uIRQLevel = 0;
+ if (fEvents)
+ {
+ /* Clear host flags which will be delivered to guest. */
+ pThis->u32HostEventFlags &= ~fEvents;
+ Log(("vmmdevSetIRQ: u32HostEventFlags=%#010x\n", pThis->u32HostEventFlags));
+ uIRQLevel = 1;
+ }
+
+ /* Set IRQ level for pin 0 (see NoWait comment in vmmdevMaybeSetIRQ). */
+ /** @todo make IRQ pin configurable, at least a symbolic constant */
+ PDMDevHlpPCISetIrqNoWait(pThis->CTX_SUFF(pDevIns), 0, uIRQLevel);
+ Log(("vmmdevSetIRQ: IRQ set %d\n", uIRQLevel));
+ }
+ else
+ Log(("vmmdevSetIRQ: IRQ is not generated, guest has not yet reported to us.\n"));
+}
+
+
+/**
+ * Sets the IRQ if there are events to be delivered.
+ *
+ * @param pThis The VMMDev state.
+ * @thread Any.
+ * @remarks Must be called owning the critical section.
+ */
+static void vmmdevMaybeSetIRQ(PVMMDEV pThis)
+{
+ Log3(("vmmdevMaybeSetIRQ: u32HostEventFlags=%#010x, u32GuestFilterMask=%#010x.\n",
+ pThis->u32HostEventFlags, pThis->u32GuestFilterMask));
+
+ if (pThis->u32HostEventFlags & pThis->u32GuestFilterMask)
+ {
+ /*
+ * Note! No need to wait for the IRQs to be set (if we're not luck
+ * with the locks, etc). It is a notification about something,
+ * which has already happened.
+ */
+ pThis->pVMMDevRAMR3->V.V1_04.fHaveEvents = true;
+ PDMDevHlpPCISetIrqNoWait(pThis->pDevInsR3, 0, 1);
+ Log3(("vmmdevMaybeSetIRQ: IRQ set.\n"));
+ }
+}
+
+/**
+ * Notifies the guest about new events (@a fAddEvents).
+ *
+ * @param pThis The VMMDev state.
+ * @param fAddEvents New events to add.
+ * @thread Any.
+ * @remarks Must be called owning the critical section.
+ */
+static void vmmdevNotifyGuestWorker(PVMMDEV pThis, uint32_t fAddEvents)
+{
+ Log3(("vmmdevNotifyGuestWorker: fAddEvents=%#010x.\n", fAddEvents));
+ Assert(PDMCritSectIsOwner(&pThis->CritSect));
+
+ if (!VMMDEV_INTERFACE_VERSION_IS_1_03(pThis))
+ {
+ Log3(("vmmdevNotifyGuestWorker: New additions detected.\n"));
+
+ if (pThis->fu32AdditionsOk)
+ {
+ const bool fHadEvents = (pThis->u32HostEventFlags & pThis->u32GuestFilterMask) != 0;
+
+ Log3(("vmmdevNotifyGuestWorker: fHadEvents=%d, u32HostEventFlags=%#010x, u32GuestFilterMask=%#010x.\n",
+ fHadEvents, pThis->u32HostEventFlags, pThis->u32GuestFilterMask));
+
+ pThis->u32HostEventFlags |= fAddEvents;
+
+ if (!fHadEvents)
+ vmmdevMaybeSetIRQ(pThis);
+ }
+ else
+ {
+ pThis->u32HostEventFlags |= fAddEvents;
+ Log(("vmmdevNotifyGuestWorker: IRQ is not generated, guest has not yet reported to us.\n"));
+ }
+ }
+ else
+ {
+ Log3(("vmmdevNotifyGuestWorker: Old additions detected.\n"));
+
+ pThis->u32HostEventFlags |= fAddEvents;
+ vmmdevSetIRQ_Legacy(pThis);
+ }
+}
+
+
+
+/* -=-=-=-=- Interfaces shared with VMMDevHGCM.cpp -=-=-=-=- */
+
+/**
+ * Notifies the guest about new events (@a fAddEvents).
+ *
+ * This is used by VMMDev.cpp as well as VMMDevHGCM.cpp.
+ *
+ * @param pThis The VMMDev state.
+ * @param fAddEvents New events to add.
+ * @thread Any.
+ */
+void VMMDevNotifyGuest(PVMMDEV pThis, uint32_t fAddEvents)
+{
+ Log3(("VMMDevNotifyGuest: fAddEvents=%#010x\n", fAddEvents));
+
+ /*
+ * Only notify the VM when it's running.
+ */
+ VMSTATE enmVMState = PDMDevHlpVMState(pThis->pDevInsR3);
+ if ( enmVMState == VMSTATE_RUNNING
+ || enmVMState == VMSTATE_RUNNING_LS
+ || enmVMState == VMSTATE_LOADING
+ || enmVMState == VMSTATE_RESUMING
+ || enmVMState == VMSTATE_SUSPENDING
+ || enmVMState == VMSTATE_SUSPENDING_LS
+ || enmVMState == VMSTATE_SUSPENDING_EXT_LS
+ || enmVMState == VMSTATE_DEBUGGING
+ || enmVMState == VMSTATE_DEBUGGING_LS
+ )
+ {
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+ vmmdevNotifyGuestWorker(pThis, fAddEvents);
+ PDMCritSectLeave(&pThis->CritSect);
+ }
+ else
+ LogRel(("VMMDevNotifyGuest: fAddEvents=%#x ignored because enmVMState=%d\n", fAddEvents, enmVMState));
+}
+
+/**
+ * Code shared by VMMDevReq_CtlGuestFilterMask and HGCM for controlling the
+ * events the guest are interested in.
+ *
+ * @param pThis The VMMDev state.
+ * @param fOrMask Events to add (VMMDEV_EVENT_XXX). Pass 0 for no
+ * change.
+ * @param fNotMask Events to remove (VMMDEV_EVENT_XXX). Pass 0 for no
+ * change.
+ *
+ * @remarks When HGCM will automatically enable VMMDEV_EVENT_HGCM when the guest
+ * starts submitting HGCM requests. Otherwise, the events are
+ * controlled by the guest.
+ */
+void VMMDevCtlSetGuestFilterMask(PVMMDEV pThis, uint32_t fOrMask, uint32_t fNotMask)
+{
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ const bool fHadEvents = (pThis->u32HostEventFlags & pThis->u32GuestFilterMask) != 0;
+
+ Log(("VMMDevCtlSetGuestFilterMask: fOrMask=%#010x, u32NotMask=%#010x, fHadEvents=%d.\n", fOrMask, fNotMask, fHadEvents));
+ if (fHadEvents)
+ {
+ if (!pThis->fNewGuestFilterMask)
+ pThis->u32NewGuestFilterMask = pThis->u32GuestFilterMask;
+
+ pThis->u32NewGuestFilterMask |= fOrMask;
+ pThis->u32NewGuestFilterMask &= ~fNotMask;
+ pThis->fNewGuestFilterMask = true;
+ }
+ else
+ {
+ pThis->u32GuestFilterMask |= fOrMask;
+ pThis->u32GuestFilterMask &= ~fNotMask;
+ vmmdevMaybeSetIRQ(pThis);
+ }
+
+ PDMCritSectLeave(&pThis->CritSect);
+}
+
+
+
+/* -=-=-=-=- Request processing functions. -=-=-=-=- */
+
+/**
+ * Handles VMMDevReq_ReportGuestInfo.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pRequestHeader The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestInfo(PVMMDEV pThis, VMMDevRequestHeader *pRequestHeader)
+{
+ AssertMsgReturn(pRequestHeader->size == sizeof(VMMDevReportGuestInfo), ("%u\n", pRequestHeader->size), VERR_INVALID_PARAMETER);
+ VBoxGuestInfo const *pInfo = &((VMMDevReportGuestInfo *)pRequestHeader)->guestInfo;
+
+ if (memcmp(&pThis->guestInfo, pInfo, sizeof(*pInfo)) != 0)
+ {
+ /* Make a copy of supplied information. */
+ pThis->guestInfo = *pInfo;
+
+ /* Check additions interface version. */
+ pThis->fu32AdditionsOk = VMMDEV_INTERFACE_VERSION_IS_OK(pThis->guestInfo.interfaceVersion);
+
+ vmmdevLogGuestOsInfo(&pThis->guestInfo);
+
+ if (pThis->pDrv && pThis->pDrv->pfnUpdateGuestInfo)
+ pThis->pDrv->pfnUpdateGuestInfo(pThis->pDrv, &pThis->guestInfo);
+ }
+
+ if (!pThis->fu32AdditionsOk)
+ return VERR_VERSION_MISMATCH;
+
+ /* Clear our IRQ in case it was high for whatever reason. */
+ PDMDevHlpPCISetIrqNoWait(pThis->pDevInsR3, 0, 0);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GuestHeartbeat.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ */
+static int vmmDevReqHandler_GuestHeartbeat(PVMMDEV pThis)
+{
+ int rc;
+ if (pThis->fHeartbeatActive)
+ {
+ uint64_t const nsNowTS = TMTimerGetNano(pThis->pFlatlinedTimer);
+ if (!pThis->fFlatlined)
+ { /* likely */ }
+ else
+ {
+ LogRel(("VMMDev: GuestHeartBeat: Guest is alive (gone %'llu ns)\n", nsNowTS - pThis->nsLastHeartbeatTS));
+ ASMAtomicWriteBool(&pThis->fFlatlined, false);
+ }
+ ASMAtomicWriteU64(&pThis->nsLastHeartbeatTS, nsNowTS);
+
+ /* Postpone (or restart if we missed a beat) the timeout timer. */
+ rc = TMTimerSetNano(pThis->pFlatlinedTimer, pThis->cNsHeartbeatTimeout);
+ }
+ else
+ rc = VINF_SUCCESS;
+ return rc;
+}
+
+
+/**
+ * Timer that fires when where have been no heartbeats for a given time.
+ *
+ * @remarks Does not take the VMMDev critsect.
+ */
+static DECLCALLBACK(void) vmmDevHeartbeatFlatlinedTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
+{
+ RT_NOREF1(pDevIns);
+ PVMMDEV pThis = (PVMMDEV)pvUser;
+ if (pThis->fHeartbeatActive)
+ {
+ uint64_t cNsElapsed = TMTimerGetNano(pTimer) - pThis->nsLastHeartbeatTS;
+ if ( !pThis->fFlatlined
+ && cNsElapsed >= pThis->cNsHeartbeatInterval)
+ {
+ LogRel(("VMMDev: vmmDevHeartbeatFlatlinedTimer: Guest seems to be unresponsive. Last heartbeat received %RU64 seconds ago\n",
+ cNsElapsed / RT_NS_1SEC));
+ ASMAtomicWriteBool(&pThis->fFlatlined, true);
+ }
+ }
+}
+
+
+/**
+ * Handles VMMDevReq_HeartbeatConfigure.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmDevReqHandler_HeartbeatConfigure(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ AssertMsgReturn(pReqHdr->size == sizeof(VMMDevReqHeartbeat), ("%u\n", pReqHdr->size), VERR_INVALID_PARAMETER);
+ VMMDevReqHeartbeat *pReq = (VMMDevReqHeartbeat *)pReqHdr;
+ int rc;
+
+ pReq->cNsInterval = pThis->cNsHeartbeatInterval;
+
+ if (pReq->fEnabled != pThis->fHeartbeatActive)
+ {
+ ASMAtomicWriteBool(&pThis->fHeartbeatActive, pReq->fEnabled);
+ if (pReq->fEnabled)
+ {
+ /*
+ * Activate the heartbeat monitor.
+ */
+ pThis->nsLastHeartbeatTS = TMTimerGetNano(pThis->pFlatlinedTimer);
+ rc = TMTimerSetNano(pThis->pFlatlinedTimer, pThis->cNsHeartbeatTimeout);
+ if (RT_SUCCESS(rc))
+ LogRel(("VMMDev: Heartbeat flatline timer set to trigger after %'RU64 ns\n", pThis->cNsHeartbeatTimeout));
+ else
+ LogRel(("VMMDev: Error starting flatline timer (heartbeat): %Rrc\n", rc));
+ }
+ else
+ {
+ /*
+ * Deactivate the heartbeat monitor.
+ */
+ rc = TMTimerStop(pThis->pFlatlinedTimer);
+ LogRel(("VMMDev: Heartbeat checking timer has been stopped (rc=%Rrc)\n", rc));
+ }
+ }
+ else
+ {
+ LogRel(("VMMDev: vmmDevReqHandler_HeartbeatConfigure: No change (fHeartbeatActive=%RTbool)\n", pThis->fHeartbeatActive));
+ rc = VINF_SUCCESS;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Handles VMMDevReq_NtBugCheck.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmDevReqHandler_NtBugCheck(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ if (pReqHdr->size == sizeof(VMMDevReqNtBugCheck))
+ {
+ VMMDevReqNtBugCheck const *pReq = (VMMDevReqNtBugCheck const *)pReqHdr;
+ DBGFR3ReportBugCheck(PDMDevHlpGetVM(pThis->pDevInsR3), PDMDevHlpGetVMCPU(pThis->pDevInsR3), DBGFEVENT_BSOD_VMMDEV,
+ pReq->uBugCheck, pReq->auParameters[0], pReq->auParameters[1],
+ pReq->auParameters[2], pReq->auParameters[3]);
+ }
+ else if (pReqHdr->size == sizeof(VMMDevRequestHeader))
+ {
+ LogRel(("VMMDev: NT BugCheck w/o data.\n"));
+ DBGFR3ReportBugCheck(PDMDevHlpGetVM(pThis->pDevInsR3), PDMDevHlpGetVMCPU(pThis->pDevInsR3), DBGFEVENT_BSOD_VMMDEV,
+ 0, 0, 0, 0, 0);
+ }
+ else
+ return VERR_INVALID_PARAMETER;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Validates a publisher tag.
+ *
+ * @returns true / false.
+ * @param pszTag Tag to validate.
+ */
+static bool vmmdevReqIsValidPublisherTag(const char *pszTag)
+{
+ /* Note! This character set is also found in Config.kmk. */
+ static char const s_szValidChars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz()[]{}+-.,";
+
+ while (*pszTag != '\0')
+ {
+ if (!strchr(s_szValidChars, *pszTag))
+ return false;
+ pszTag++;
+ }
+ return true;
+}
+
+
+/**
+ * Validates a build tag.
+ *
+ * @returns true / false.
+ * @param pszTag Tag to validate.
+ */
+static bool vmmdevReqIsValidBuildTag(const char *pszTag)
+{
+ int cchPrefix;
+ if (!strncmp(pszTag, "RC", 2))
+ cchPrefix = 2;
+ else if (!strncmp(pszTag, "BETA", 4))
+ cchPrefix = 4;
+ else if (!strncmp(pszTag, "ALPHA", 5))
+ cchPrefix = 5;
+ else
+ return false;
+
+ if (pszTag[cchPrefix] == '\0')
+ return true;
+
+ uint8_t u8;
+ int rc = RTStrToUInt8Full(&pszTag[cchPrefix], 10, &u8);
+ return rc == VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestInfo2.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestInfo2(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ AssertMsgReturn(pReqHdr->size == sizeof(VMMDevReportGuestInfo2), ("%u\n", pReqHdr->size), VERR_INVALID_PARAMETER);
+ VBoxGuestInfo2 const *pInfo2 = &((VMMDevReportGuestInfo2 *)pReqHdr)->guestInfo;
+
+ LogRel(("VMMDev: Guest Additions information report: Version %d.%d.%d r%d '%.*s'\n",
+ pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild,
+ pInfo2->additionsRevision, sizeof(pInfo2->szName), pInfo2->szName));
+
+ /* The interface was introduced in 3.2 and will definitely not be
+ backported beyond 3.0 (bird). */
+ AssertMsgReturn(pInfo2->additionsMajor >= 3,
+ ("%u.%u.%u\n", pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild),
+ VERR_INVALID_PARAMETER);
+
+ /* The version must fit in a full version compression. */
+ uint32_t uFullVersion = VBOX_FULL_VERSION_MAKE(pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild);
+ AssertMsgReturn( VBOX_FULL_VERSION_GET_MAJOR(uFullVersion) == pInfo2->additionsMajor
+ && VBOX_FULL_VERSION_GET_MINOR(uFullVersion) == pInfo2->additionsMinor
+ && VBOX_FULL_VERSION_GET_BUILD(uFullVersion) == pInfo2->additionsBuild,
+ ("%u.%u.%u\n", pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild),
+ VERR_OUT_OF_RANGE);
+
+ /*
+ * Validate the name.
+ * Be less strict towards older additions (< v4.1.50).
+ */
+ AssertCompile(sizeof(pThis->guestInfo2.szName) == sizeof(pInfo2->szName));
+ AssertReturn(RTStrEnd(pInfo2->szName, sizeof(pInfo2->szName)) != NULL, VERR_INVALID_PARAMETER);
+ const char *pszName = pInfo2->szName;
+
+ /* The version number which shouldn't be there. */
+ char szTmp[sizeof(pInfo2->szName)];
+ size_t cchStart = RTStrPrintf(szTmp, sizeof(szTmp), "%u.%u.%u", pInfo2->additionsMajor, pInfo2->additionsMinor, pInfo2->additionsBuild);
+ AssertMsgReturn(!strncmp(pszName, szTmp, cchStart), ("%s != %s\n", pszName, szTmp), VERR_INVALID_PARAMETER);
+ pszName += cchStart;
+
+ /* Now we can either have nothing or a build tag or/and a publisher tag. */
+ if (*pszName != '\0')
+ {
+ const char *pszRelaxedName = "";
+ bool const fStrict = pInfo2->additionsMajor > 4
+ || (pInfo2->additionsMajor == 4 && pInfo2->additionsMinor > 1)
+ || (pInfo2->additionsMajor == 4 && pInfo2->additionsMinor == 1 && pInfo2->additionsBuild >= 50);
+ bool fOk = false;
+ if (*pszName == '_')
+ {
+ pszName++;
+ strcpy(szTmp, pszName);
+ char *pszTag2 = strchr(szTmp, '_');
+ if (!pszTag2)
+ {
+ fOk = vmmdevReqIsValidBuildTag(szTmp)
+ || vmmdevReqIsValidPublisherTag(szTmp);
+ }
+ else
+ {
+ *pszTag2++ = '\0';
+ fOk = vmmdevReqIsValidBuildTag(szTmp);
+ if (fOk)
+ {
+ fOk = vmmdevReqIsValidPublisherTag(pszTag2);
+ if (!fOk)
+ pszRelaxedName = szTmp;
+ }
+ }
+ }
+
+ if (!fOk)
+ {
+ AssertLogRelMsgReturn(!fStrict, ("%s", pszName), VERR_INVALID_PARAMETER);
+
+ /* non-strict mode, just zap the extra stuff. */
+ LogRel(("VMMDev: ReportGuestInfo2: Ignoring unparsable version name bits: '%s' -> '%s'.\n", pszName, pszRelaxedName));
+ pszName = pszRelaxedName;
+ }
+ }
+
+ /*
+ * Save the info and tell Main or whoever is listening.
+ */
+ pThis->guestInfo2.uFullVersion = uFullVersion;
+ pThis->guestInfo2.uRevision = pInfo2->additionsRevision;
+ pThis->guestInfo2.fFeatures = pInfo2->additionsFeatures;
+ strcpy(pThis->guestInfo2.szName, pszName);
+
+ if (pThis->pDrv && pThis->pDrv->pfnUpdateGuestInfo2)
+ pThis->pDrv->pfnUpdateGuestInfo2(pThis->pDrv, uFullVersion, pszName, pInfo2->additionsRevision, pInfo2->additionsFeatures);
+
+ /* Clear our IRQ in case it was high for whatever reason. */
+ PDMDevHlpPCISetIrqNoWait(pThis->pDevInsR3, 0, 0);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Allocates a new facility status entry, initializing it to inactive.
+ *
+ * @returns Pointer to a facility status entry on success, NULL on failure
+ * (table full).
+ * @param pThis The VMMDev instance data.
+ * @param enmFacility The facility type code.
+ * @param fFixed This is set when allocating the standard entries
+ * from the constructor.
+ * @param pTimeSpecNow Optionally giving the entry timestamp to use (ctor).
+ */
+static PVMMDEVFACILITYSTATUSENTRY
+vmmdevAllocFacilityStatusEntry(PVMMDEV pThis, VBoxGuestFacilityType enmFacility, bool fFixed, PCRTTIMESPEC pTimeSpecNow)
+{
+ /* If full, expunge one inactive entry. */
+ if (pThis->cFacilityStatuses == RT_ELEMENTS(pThis->aFacilityStatuses))
+ {
+ uint32_t i = pThis->cFacilityStatuses;
+ while (i-- > 0)
+ {
+ if ( pThis->aFacilityStatuses[i].enmStatus == VBoxGuestFacilityStatus_Inactive
+ && !pThis->aFacilityStatuses[i].fFixed)
+ {
+ pThis->cFacilityStatuses--;
+ int cToMove = pThis->cFacilityStatuses - i;
+ if (cToMove)
+ memmove(&pThis->aFacilityStatuses[i], &pThis->aFacilityStatuses[i + 1],
+ cToMove * sizeof(pThis->aFacilityStatuses[i]));
+ RT_ZERO(pThis->aFacilityStatuses[pThis->cFacilityStatuses]);
+ break;
+ }
+ }
+
+ if (pThis->cFacilityStatuses == RT_ELEMENTS(pThis->aFacilityStatuses))
+ return NULL;
+ }
+
+ /* Find location in array (it's sorted). */
+ uint32_t i = pThis->cFacilityStatuses;
+ while (i-- > 0)
+ if ((uint32_t)pThis->aFacilityStatuses[i].enmFacility < (uint32_t)enmFacility)
+ break;
+ i++;
+
+ /* Move. */
+ int cToMove = pThis->cFacilityStatuses - i;
+ if (cToMove > 0)
+ memmove(&pThis->aFacilityStatuses[i + 1], &pThis->aFacilityStatuses[i],
+ cToMove * sizeof(pThis->aFacilityStatuses[i]));
+ pThis->cFacilityStatuses++;
+
+ /* Initialize. */
+ pThis->aFacilityStatuses[i].enmFacility = enmFacility;
+ pThis->aFacilityStatuses[i].enmStatus = VBoxGuestFacilityStatus_Inactive;
+ pThis->aFacilityStatuses[i].fFixed = fFixed;
+ pThis->aFacilityStatuses[i].afPadding[0] = 0;
+ pThis->aFacilityStatuses[i].afPadding[1] = 0;
+ pThis->aFacilityStatuses[i].afPadding[2] = 0;
+ pThis->aFacilityStatuses[i].fFlags = 0;
+ if (pTimeSpecNow)
+ pThis->aFacilityStatuses[i].TimeSpecTS = *pTimeSpecNow;
+ else
+ RTTimeSpecSetNano(&pThis->aFacilityStatuses[i].TimeSpecTS, 0);
+
+ return &pThis->aFacilityStatuses[i];
+}
+
+
+/**
+ * Gets a facility status entry, allocating a new one if not already present.
+ *
+ * @returns Pointer to a facility status entry on success, NULL on failure
+ * (table full).
+ * @param pThis The VMMDev instance data.
+ * @param enmFacility The facility type code.
+ */
+static PVMMDEVFACILITYSTATUSENTRY vmmdevGetFacilityStatusEntry(PVMMDEV pThis, VBoxGuestFacilityType enmFacility)
+{
+ /** @todo change to binary search. */
+ uint32_t i = pThis->cFacilityStatuses;
+ while (i-- > 0)
+ {
+ if (pThis->aFacilityStatuses[i].enmFacility == enmFacility)
+ return &pThis->aFacilityStatuses[i];
+ if ((uint32_t)pThis->aFacilityStatuses[i].enmFacility < (uint32_t)enmFacility)
+ break;
+ }
+ return vmmdevAllocFacilityStatusEntry(pThis, enmFacility, false /*fFixed*/, NULL);
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestStatus(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ /*
+ * Validate input.
+ */
+ AssertMsgReturn(pReqHdr->size == sizeof(VMMDevReportGuestStatus), ("%u\n", pReqHdr->size), VERR_INVALID_PARAMETER);
+ VBoxGuestStatus *pStatus = &((VMMDevReportGuestStatus *)pReqHdr)->guestStatus;
+ AssertMsgReturn( pStatus->facility > VBoxGuestFacilityType_Unknown
+ && pStatus->facility <= VBoxGuestFacilityType_All,
+ ("%d\n", pStatus->facility),
+ VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pStatus->status == (VBoxGuestFacilityStatus)(uint16_t)pStatus->status,
+ ("%#x (%u)\n", pStatus->status, pStatus->status),
+ VERR_OUT_OF_RANGE);
+
+ /*
+ * Do the update.
+ */
+ RTTIMESPEC Now;
+ RTTimeNow(&Now);
+ if (pStatus->facility == VBoxGuestFacilityType_All)
+ {
+ uint32_t i = pThis->cFacilityStatuses;
+ while (i-- > 0)
+ {
+ pThis->aFacilityStatuses[i].TimeSpecTS = Now;
+ pThis->aFacilityStatuses[i].enmStatus = pStatus->status;
+ pThis->aFacilityStatuses[i].fFlags = pStatus->flags;
+ }
+ }
+ else
+ {
+ PVMMDEVFACILITYSTATUSENTRY pEntry = vmmdevGetFacilityStatusEntry(pThis, pStatus->facility);
+ if (!pEntry)
+ {
+ LogRelMax(10, ("VMMDev: Facility table is full - facility=%u status=%u\n", pStatus->facility, pStatus->status));
+ return VERR_OUT_OF_RESOURCES;
+ }
+
+ pEntry->TimeSpecTS = Now;
+ pEntry->enmStatus = pStatus->status;
+ pEntry->fFlags = pStatus->flags;
+ }
+
+ if (pThis->pDrv && pThis->pDrv->pfnUpdateGuestStatus)
+ pThis->pDrv->pfnUpdateGuestStatus(pThis->pDrv, pStatus->facility, pStatus->status, pStatus->flags, &Now);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestUserState.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestUserState(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ /*
+ * Validate input.
+ */
+ VMMDevReportGuestUserState *pReq = (VMMDevReportGuestUserState *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReqHdr->size), VERR_INVALID_PARAMETER);
+
+ if ( pThis->pDrv
+ && pThis->pDrv->pfnUpdateGuestUserState)
+ {
+ /* Play safe. */
+ AssertReturn(pReq->header.size <= _2K, VERR_TOO_MUCH_DATA);
+ AssertReturn(pReq->status.cbUser <= 256, VERR_TOO_MUCH_DATA);
+ AssertReturn(pReq->status.cbDomain <= 256, VERR_TOO_MUCH_DATA);
+ AssertReturn(pReq->status.cbDetails <= _1K, VERR_TOO_MUCH_DATA);
+
+ /* pbDynamic marks the beginning of the struct's dynamically
+ * allocated data area. */
+ uint8_t *pbDynamic = (uint8_t *)&pReq->status.szUser;
+ uint32_t cbLeft = pReqHdr->size - RT_UOFFSETOF(VMMDevReportGuestUserState, status.szUser);
+
+ /* The user. */
+ AssertReturn(pReq->status.cbUser > 0, VERR_INVALID_PARAMETER); /* User name is required. */
+ AssertReturn(pReq->status.cbUser <= cbLeft, VERR_INVALID_PARAMETER);
+ const char *pszUser = (const char *)pbDynamic;
+ AssertReturn(RTStrEnd(pszUser, pReq->status.cbUser), VERR_INVALID_PARAMETER);
+ int rc = RTStrValidateEncoding(pszUser);
+ AssertRCReturn(rc, rc);
+
+ /* Advance to the next field. */
+ pbDynamic += pReq->status.cbUser;
+ cbLeft -= pReq->status.cbUser;
+
+ /* pszDomain can be NULL. */
+ AssertReturn(pReq->status.cbDomain <= cbLeft, VERR_INVALID_PARAMETER);
+ const char *pszDomain = NULL;
+ if (pReq->status.cbDomain)
+ {
+ pszDomain = (const char *)pbDynamic;
+ AssertReturn(RTStrEnd(pszDomain, pReq->status.cbDomain), VERR_INVALID_PARAMETER);
+ rc = RTStrValidateEncoding(pszDomain);
+ AssertRCReturn(rc, rc);
+
+ /* Advance to the next field. */
+ pbDynamic += pReq->status.cbDomain;
+ cbLeft -= pReq->status.cbDomain;
+ }
+
+ /* pbDetails can be NULL. */
+ const uint8_t *pbDetails = NULL;
+ AssertReturn(pReq->status.cbDetails <= cbLeft, VERR_INVALID_PARAMETER);
+ if (pReq->status.cbDetails > 0)
+ pbDetails = pbDynamic;
+
+ pThis->pDrv->pfnUpdateGuestUserState(pThis->pDrv, pszUser, pszDomain, (uint32_t)pReq->status.state,
+ pbDetails, pReq->status.cbDetails);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestCapabilities.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestCapabilities(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqGuestCapabilities *pReq = (VMMDevReqGuestCapabilities *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* Enable VMMDEV_GUEST_SUPPORTS_GRAPHICS automatically for guests using the old
+ * request to report their capabilities.
+ */
+ const uint32_t fu32Caps = pReq->caps | VMMDEV_GUEST_SUPPORTS_GRAPHICS;
+
+ if (pThis->guestCaps != fu32Caps)
+ {
+ /* make a copy of supplied information */
+ pThis->guestCaps = fu32Caps;
+
+ LogRel(("VMMDev: Guest Additions capability report (legacy): (0x%x) seamless: %s, hostWindowMapping: %s, graphics: yes\n",
+ fu32Caps,
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS ? "yes" : "no",
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING ? "yes" : "no"));
+
+ if (pThis->pDrv && pThis->pDrv->pfnUpdateGuestCapabilities)
+ pThis->pDrv->pfnUpdateGuestCapabilities(pThis->pDrv, fu32Caps);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_SetGuestCapabilities.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetGuestCapabilities(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqGuestCapabilities2 *pReq = (VMMDevReqGuestCapabilities2 *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ uint32_t fu32Caps = pThis->guestCaps;
+ fu32Caps |= pReq->u32OrMask;
+ fu32Caps &= ~pReq->u32NotMask;
+
+ LogRel(("VMMDev: Guest Additions capability report: (%#x -> %#x) seamless: %s, hostWindowMapping: %s, graphics: %s\n",
+ pThis->guestCaps, fu32Caps,
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS ? "yes" : "no",
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING ? "yes" : "no",
+ fu32Caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS ? "yes" : "no"));
+
+ pThis->guestCaps = fu32Caps;
+
+ if (pThis->pDrv && pThis->pDrv->pfnUpdateGuestCapabilities)
+ pThis->pDrv->pfnUpdateGuestCapabilities(pThis->pDrv, fu32Caps);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetMouseStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetMouseStatus(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqMouseStatus *pReq = (VMMDevReqMouseStatus *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->mouseFeatures = pThis->mouseCapabilities
+ & VMMDEV_MOUSE_MASK;
+ pReq->pointerXPos = pThis->mouseXAbs;
+ pReq->pointerYPos = pThis->mouseYAbs;
+ LogRel2(("VMMDev: vmmdevReqHandler_GetMouseStatus: mouseFeatures=%#x, xAbs=%d, yAbs=%d\n",
+ pReq->mouseFeatures, pReq->pointerXPos, pReq->pointerYPos));
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_SetMouseStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetMouseStatus(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqMouseStatus *pReq = (VMMDevReqMouseStatus *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ LogRelFlow(("VMMDev: vmmdevReqHandler_SetMouseStatus: mouseFeatures=%#x\n", pReq->mouseFeatures));
+
+ bool fNotify = false;
+ if ( (pReq->mouseFeatures & VMMDEV_MOUSE_NOTIFY_HOST_MASK)
+ != ( pThis->mouseCapabilities
+ & VMMDEV_MOUSE_NOTIFY_HOST_MASK))
+ fNotify = true;
+
+ pThis->mouseCapabilities &= ~VMMDEV_MOUSE_GUEST_MASK;
+ pThis->mouseCapabilities |= (pReq->mouseFeatures & VMMDEV_MOUSE_GUEST_MASK);
+
+ LogRelFlow(("VMMDev: vmmdevReqHandler_SetMouseStatus: New host capabilities: %#x\n", pThis->mouseCapabilities));
+
+ /*
+ * Notify connector if something changed.
+ */
+ if (fNotify)
+ {
+ LogRelFlow(("VMMDev: vmmdevReqHandler_SetMouseStatus: Notifying connector\n"));
+ pThis->pDrv->pfnUpdateMouseCapabilities(pThis->pDrv, pThis->mouseCapabilities);
+ }
+
+ return VINF_SUCCESS;
+}
+
+static int vmmdevVerifyPointerShape(VMMDevReqMousePointer *pReq)
+{
+ /* Should be enough for most mouse pointers. */
+ if (pReq->width > 8192 || pReq->height > 8192)
+ return VERR_INVALID_PARAMETER;
+
+ uint32_t cbShape = (pReq->width + 7) / 8 * pReq->height; /* size of the AND mask */
+ cbShape = ((cbShape + 3) & ~3) + pReq->width * 4 * pReq->height; /* + gap + size of the XOR mask */
+ if (RT_UOFFSETOF(VMMDevReqMousePointer, pointerData) + cbShape > pReq->header.size)
+ return VERR_INVALID_PARAMETER;
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Handles VMMDevReq_SetPointerShape.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetPointerShape(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqMousePointer *pReq = (VMMDevReqMousePointer *)pReqHdr;
+ if (pReq->header.size < sizeof(*pReq))
+ {
+ AssertMsg(pReq->header.size == 0x10028 && pReq->header.version == 10000, /* don't complain about legacy!!! */
+ ("VMMDev mouse shape structure has invalid size %d (%#x) version=%d!\n",
+ pReq->header.size, pReq->header.size, pReq->header.version));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ bool fVisible = RT_BOOL(pReq->fFlags & VBOX_MOUSE_POINTER_VISIBLE);
+ bool fAlpha = RT_BOOL(pReq->fFlags & VBOX_MOUSE_POINTER_ALPHA);
+ bool fShape = RT_BOOL(pReq->fFlags & VBOX_MOUSE_POINTER_SHAPE);
+
+ Log(("VMMDevReq_SetPointerShape: visible: %d, alpha: %d, shape = %d, width: %d, height: %d\n",
+ fVisible, fAlpha, fShape, pReq->width, pReq->height));
+
+ if (pReq->header.size == sizeof(VMMDevReqMousePointer))
+ {
+ /* The guest did not provide the shape actually. */
+ fShape = false;
+ }
+
+ /* forward call to driver */
+ if (fShape)
+ {
+ int rc = vmmdevVerifyPointerShape(pReq);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ pThis->pDrv->pfnUpdatePointerShape(pThis->pDrv,
+ fVisible,
+ fAlpha,
+ pReq->xHot, pReq->yHot,
+ pReq->width, pReq->height,
+ pReq->pointerData);
+ }
+ else
+ {
+ pThis->pDrv->pfnUpdatePointerShape(pThis->pDrv,
+ fVisible,
+ 0,
+ 0, 0,
+ 0, 0,
+ NULL);
+ }
+
+ pThis->fHostCursorRequested = fVisible;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetHostTime.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetHostTime(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqHostTime *pReq = (VMMDevReqHostTime *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ if (RT_LIKELY(!pThis->fGetHostTimeDisabled))
+ {
+ RTTIMESPEC now;
+ pReq->time = RTTimeSpecGetMilli(PDMDevHlpTMUtcNow(pThis->pDevInsR3, &now));
+ return VINF_SUCCESS;
+ }
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Handles VMMDevReq_GetHypervisorInfo.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetHypervisorInfo(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqHypervisorInfo *pReq = (VMMDevReqHypervisorInfo *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ return PGMR3MappingsSize(PDMDevHlpGetVM(pThis->pDevInsR3), &pReq->hypervisorSize);
+}
+
+
+/**
+ * Handles VMMDevReq_SetHypervisorInfo.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetHypervisorInfo(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqHypervisorInfo *pReq = (VMMDevReqHypervisorInfo *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ int rc;
+ PVM pVM = PDMDevHlpGetVM(pThis->pDevInsR3);
+ if (pReq->hypervisorStart == 0)
+ rc = PGMR3MappingsUnfix(pVM);
+ else
+ {
+ /* only if the client has queried the size before! */
+ uint32_t cbMappings;
+ rc = PGMR3MappingsSize(pVM, &cbMappings);
+ if (RT_SUCCESS(rc) && pReq->hypervisorSize == cbMappings)
+ {
+ /* new reservation */
+ rc = PGMR3MappingsFix(pVM, pReq->hypervisorStart, pReq->hypervisorSize);
+ LogRel(("VMMDev: Guest reported fixed hypervisor window at 0%010x LB %#x (rc=%Rrc)\n",
+ pReq->hypervisorStart, pReq->hypervisorSize, rc));
+ }
+ else if (RT_FAILURE(rc))
+ rc = VERR_TRY_AGAIN;
+ }
+ return rc;
+}
+
+
+/**
+ * Handles VMMDevReq_RegisterPatchMemory.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_RegisterPatchMemory(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqPatchMemory *pReq = (VMMDevReqPatchMemory *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ return VMMR3RegisterPatchMemory(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->pPatchMem, pReq->cbPatchMem);
+}
+
+
+/**
+ * Handles VMMDevReq_DeregisterPatchMemory.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_DeregisterPatchMemory(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqPatchMemory *pReq = (VMMDevReqPatchMemory *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ return VMMR3DeregisterPatchMemory(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->pPatchMem, pReq->cbPatchMem);
+}
+
+
+/**
+ * Handles VMMDevReq_SetPowerStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetPowerStatus(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevPowerStateRequest *pReq = (VMMDevPowerStateRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ switch (pReq->powerState)
+ {
+ case VMMDevPowerState_Pause:
+ {
+ LogRel(("VMMDev: Guest requests the VM to be suspended (paused)\n"));
+ return PDMDevHlpVMSuspend(pThis->pDevInsR3);
+ }
+
+ case VMMDevPowerState_PowerOff:
+ {
+ LogRel(("VMMDev: Guest requests the VM to be turned off\n"));
+ return PDMDevHlpVMPowerOff(pThis->pDevInsR3);
+ }
+
+ case VMMDevPowerState_SaveState:
+ {
+ if (true /*pThis->fAllowGuestToSaveState*/)
+ {
+ LogRel(("VMMDev: Guest requests the VM to be saved and powered off\n"));
+ return PDMDevHlpVMSuspendSaveAndPowerOff(pThis->pDevInsR3);
+ }
+ LogRel(("VMMDev: Guest requests the VM to be saved and powered off, declined\n"));
+ return VERR_ACCESS_DENIED;
+ }
+
+ default:
+ AssertMsgFailed(("VMMDev: Invalid power state request: %d\n", pReq->powerState));
+ return VERR_INVALID_PARAMETER;
+ }
+}
+
+
+/**
+ * Handles VMMDevReq_GetDisplayChangeRequest
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @remarks Deprecated.
+ */
+static int vmmdevReqHandler_GetDisplayChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevDisplayChangeRequest *pReq = (VMMDevDisplayChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+/**
+ * @todo It looks like a multi-monitor guest which only uses
+ * @c VMMDevReq_GetDisplayChangeRequest (not the *2 version) will get
+ * into a @c VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST event loop if it tries
+ * to acknowlege host requests for additional monitors. Should the loop
+ * which checks for those requests be removed?
+ */
+
+ DISPLAYCHANGEREQUEST *pDispRequest = &pThis->displayChangeData.aRequests[0];
+
+ if (pReq->eventAck == VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST)
+ {
+ /* Current request has been read at least once. */
+ pDispRequest->fPending = false;
+
+ /* Check if there are more pending requests. */
+ for (unsigned i = 1; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ if (pThis->displayChangeData.aRequests[i].fPending)
+ {
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST);
+ break;
+ }
+ }
+
+ /* Remember which resolution the client has queried, subsequent reads
+ * will return the same values. */
+ pDispRequest->lastReadDisplayChangeRequest = pDispRequest->displayChangeRequest;
+ pThis->displayChangeData.fGuestSentChangeEventAck = true;
+ }
+
+ /* If not a response to a VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST, just
+ * read the last valid video mode hint. This happens when the guest X server
+ * determines the initial mode. */
+ VMMDevDisplayDef const *pDisplayDef = pThis->displayChangeData.fGuestSentChangeEventAck ?
+ &pDispRequest->lastReadDisplayChangeRequest :
+ &pDispRequest->displayChangeRequest;
+ pReq->xres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CX) ? pDisplayDef->cx : 0;
+ pReq->yres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CY) ? pDisplayDef->cy : 0;
+ pReq->bpp = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_BPP) ? pDisplayDef->cBitsPerPixel : 0;
+
+ Log(("VMMDev: returning display change request xres = %d, yres = %d, bpp = %d\n", pReq->xres, pReq->yres, pReq->bpp));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetDisplayChangeRequest2.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetDisplayChangeRequest2(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevDisplayChangeRequest2 *pReq = (VMMDevDisplayChangeRequest2 *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ DISPLAYCHANGEREQUEST *pDispRequest = NULL;
+
+ if (pReq->eventAck == VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST)
+ {
+ /* Select a pending request to report. */
+ unsigned i;
+ for (i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ if (pThis->displayChangeData.aRequests[i].fPending)
+ {
+ pDispRequest = &pThis->displayChangeData.aRequests[i];
+ /* Remember which request should be reported. */
+ pThis->displayChangeData.iCurrentMonitor = i;
+ Log3(("VMMDev: will report pending request for %u\n", i));
+ break;
+ }
+ }
+
+ /* Check if there are more pending requests. */
+ i++;
+ for (; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ if (pThis->displayChangeData.aRequests[i].fPending)
+ {
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST);
+ Log3(("VMMDev: another pending at %u\n", i));
+ break;
+ }
+ }
+
+ if (pDispRequest)
+ {
+ /* Current request has been read at least once. */
+ pDispRequest->fPending = false;
+
+ /* Remember which resolution the client has queried, subsequent reads
+ * will return the same values. */
+ pDispRequest->lastReadDisplayChangeRequest = pDispRequest->displayChangeRequest;
+ pThis->displayChangeData.fGuestSentChangeEventAck = true;
+ }
+ else
+ {
+ Log3(("VMMDev: no pending request!!!\n"));
+ }
+ }
+
+ if (!pDispRequest)
+ {
+ Log3(("VMMDev: default to %d\n", pThis->displayChangeData.iCurrentMonitor));
+ pDispRequest = &pThis->displayChangeData.aRequests[pThis->displayChangeData.iCurrentMonitor];
+ }
+
+ /* If not a response to a VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST, just
+ * read the last valid video mode hint. This happens when the guest X server
+ * determines the initial mode. */
+ VMMDevDisplayDef const *pDisplayDef = pThis->displayChangeData.fGuestSentChangeEventAck ?
+ &pDispRequest->lastReadDisplayChangeRequest :
+ &pDispRequest->displayChangeRequest;
+ pReq->xres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CX) ? pDisplayDef->cx : 0;
+ pReq->yres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CY) ? pDisplayDef->cy : 0;
+ pReq->bpp = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_BPP) ? pDisplayDef->cBitsPerPixel : 0;
+ pReq->display = pDisplayDef->idDisplay;
+
+ Log(("VMMDev: returning display change request xres = %d, yres = %d, bpp = %d at %d\n",
+ pReq->xres, pReq->yres, pReq->bpp, pReq->display));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetDisplayChangeRequestEx.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetDisplayChangeRequestEx(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevDisplayChangeRequestEx *pReq = (VMMDevDisplayChangeRequestEx *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ DISPLAYCHANGEREQUEST *pDispRequest = NULL;
+
+ if (pReq->eventAck == VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST)
+ {
+ /* Select a pending request to report. */
+ unsigned i;
+ for (i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ if (pThis->displayChangeData.aRequests[i].fPending)
+ {
+ pDispRequest = &pThis->displayChangeData.aRequests[i];
+ /* Remember which request should be reported. */
+ pThis->displayChangeData.iCurrentMonitor = i;
+ Log3(("VMMDev: will report pending request for %d\n",
+ i));
+ break;
+ }
+ }
+
+ /* Check if there are more pending requests. */
+ i++;
+ for (; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ if (pThis->displayChangeData.aRequests[i].fPending)
+ {
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST);
+ Log3(("VMMDev: another pending at %d\n",
+ i));
+ break;
+ }
+ }
+
+ if (pDispRequest)
+ {
+ /* Current request has been read at least once. */
+ pDispRequest->fPending = false;
+
+ /* Remember which resolution the client has queried, subsequent reads
+ * will return the same values. */
+ pDispRequest->lastReadDisplayChangeRequest = pDispRequest->displayChangeRequest;
+ pThis->displayChangeData.fGuestSentChangeEventAck = true;
+ }
+ else
+ {
+ Log3(("VMMDev: no pending request!!!\n"));
+ }
+ }
+
+ if (!pDispRequest)
+ {
+ Log3(("VMMDev: default to %d\n",
+ pThis->displayChangeData.iCurrentMonitor));
+ pDispRequest = &pThis->displayChangeData.aRequests[pThis->displayChangeData.iCurrentMonitor];
+ }
+
+ /* If not a response to a VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST, just
+ * read the last valid video mode hint. This happens when the guest X server
+ * determines the initial mode. */
+ VMMDevDisplayDef const *pDisplayDef = pThis->displayChangeData.fGuestSentChangeEventAck ?
+ &pDispRequest->lastReadDisplayChangeRequest :
+ &pDispRequest->displayChangeRequest;
+ pReq->xres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CX) ? pDisplayDef->cx : 0;
+ pReq->yres = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_CY) ? pDisplayDef->cy : 0;
+ pReq->bpp = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_BPP) ? pDisplayDef->cBitsPerPixel : 0;
+ pReq->display = pDisplayDef->idDisplay;
+ pReq->cxOrigin = pDisplayDef->xOrigin;
+ pReq->cyOrigin = pDisplayDef->yOrigin;
+ pReq->fEnabled = !RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_DISABLED);
+ pReq->fChangeOrigin = RT_BOOL(pDisplayDef->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN);
+
+ Log(("VMMDevEx: returning display change request xres = %d, yres = %d, bpp = %d id %d xPos = %d, yPos = %d & Enabled=%d\n",
+ pReq->xres, pReq->yres, pReq->bpp, pReq->display, pReq->cxOrigin, pReq->cyOrigin, pReq->fEnabled));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetDisplayChangeRequestMulti.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetDisplayChangeRequestMulti(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevDisplayChangeRequestMulti *pReq = (VMMDevDisplayChangeRequestMulti *)pReqHdr;
+ unsigned i;
+
+ ASSERT_GUEST_MSG_RETURN(pReq->header.size >= sizeof(*pReq),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ uint32_t const cDisplays = pReq->cDisplays;
+ ASSERT_GUEST_MSG_RETURN(cDisplays > 0 && cDisplays <= RT_ELEMENTS(pThis->displayChangeData.aRequests),
+ ("cDisplays %u\n", cDisplays), VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ ASSERT_GUEST_MSG_RETURN(pReq->header.size >= sizeof(*pReq) + (cDisplays - 1) * sizeof(VMMDevDisplayDef),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ if (pReq->eventAck == VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST)
+ {
+ uint32_t cDisplaysOut = 0;
+ /* Remember which resolution the client has queried, subsequent reads
+ * will return the same values. */
+ for (i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); ++i)
+ {
+ DISPLAYCHANGEREQUEST *pDCR = &pThis->displayChangeData.aRequests[i];
+
+ pDCR->lastReadDisplayChangeRequest = pDCR->displayChangeRequest;
+
+ if (pDCR->fPending)
+ {
+ if (cDisplaysOut < cDisplays)
+ pReq->aDisplays[cDisplaysOut] = pDCR->lastReadDisplayChangeRequest;
+
+ cDisplaysOut++;
+ pDCR->fPending = false;
+ }
+ }
+
+ pReq->cDisplays = cDisplaysOut;
+ pThis->displayChangeData.fGuestSentChangeEventAck = true;
+ }
+ else
+ {
+ /* Fill the guest request with monitor layout data. */
+ for (i = 0; i < cDisplays; ++i)
+ {
+ /* If not a response to a VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST, just
+ * read the last valid video mode hint. This happens when the guest X server
+ * determines the initial mode. */
+ DISPLAYCHANGEREQUEST const *pDCR = &pThis->displayChangeData.aRequests[i];
+ VMMDevDisplayDef const *pDisplayDef = pThis->displayChangeData.fGuestSentChangeEventAck ?
+ &pDCR->lastReadDisplayChangeRequest :
+ &pDCR->displayChangeRequest;
+ pReq->aDisplays[i] = *pDisplayDef;
+ }
+ }
+
+ Log(("VMMDev: returning multimonitor display change request cDisplays %d\n", cDisplays));
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_VideoModeSupported.
+ *
+ * Query whether the given video mode is supported.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoModeSupported(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoModeSupportedRequest *pReq = (VMMDevVideoModeSupportedRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* forward the call */
+ return pThis->pDrv->pfnVideoModeSupported(pThis->pDrv,
+ 0, /* primary screen. */
+ pReq->width,
+ pReq->height,
+ pReq->bpp,
+ &pReq->fSupported);
+}
+
+
+/**
+ * Handles VMMDevReq_VideoModeSupported2.
+ *
+ * Query whether the given video mode is supported for a specific display
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoModeSupported2(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoModeSupportedRequest2 *pReq = (VMMDevVideoModeSupportedRequest2 *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* forward the call */
+ return pThis->pDrv->pfnVideoModeSupported(pThis->pDrv,
+ pReq->display,
+ pReq->width,
+ pReq->height,
+ pReq->bpp,
+ &pReq->fSupported);
+}
+
+
+
+/**
+ * Handles VMMDevReq_GetHeightReduction.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetHeightReduction(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevGetHeightReductionRequest *pReq = (VMMDevGetHeightReductionRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* forward the call */
+ return pThis->pDrv->pfnGetHeightReduction(pThis->pDrv, &pReq->heightReduction);
+}
+
+
+/**
+ * Handles VMMDevReq_AcknowledgeEvents.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_AcknowledgeEvents(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevEvents *pReq = (VMMDevEvents *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ STAM_REL_COUNTER_INC(&pThis->StatSlowIrqAck);
+
+ if (!VMMDEV_INTERFACE_VERSION_IS_1_03(pThis))
+ {
+ /*
+ * Note! This code is duplicated in vmmdevFastRequestIrqAck.
+ */
+ if (pThis->fNewGuestFilterMask)
+ {
+ pThis->fNewGuestFilterMask = false;
+ pThis->u32GuestFilterMask = pThis->u32NewGuestFilterMask;
+ }
+
+ pReq->events = pThis->u32HostEventFlags & pThis->u32GuestFilterMask;
+
+ pThis->u32HostEventFlags &= ~pThis->u32GuestFilterMask;
+ pThis->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false;
+
+ PDMDevHlpPCISetIrqNoWait(pThis->CTX_SUFF(pDevIns), 0, 0);
+ }
+ else
+ vmmdevSetIRQ_Legacy(pThis);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_CtlGuestFilterMask.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_CtlGuestFilterMask(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevCtlGuestFilterMask *pReq = (VMMDevCtlGuestFilterMask *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ LogRelFlow(("VMMDev: vmmdevReqHandler_CtlGuestFilterMask: OR mask: %#x, NOT mask: %#x\n", pReq->u32OrMask, pReq->u32NotMask));
+
+ /* HGCM event notification is enabled by the VMMDev device
+ * automatically when any HGCM command is issued. The guest
+ * cannot disable these notifications. */
+ VMMDevCtlSetGuestFilterMask(pThis, pReq->u32OrMask, pReq->u32NotMask & ~VMMDEV_EVENT_HGCM);
+ return VINF_SUCCESS;
+}
+
+#ifdef VBOX_WITH_HGCM
+
+/**
+ * Handles VMMDevReq_HGCMConnect.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @param GCPhysReqHdr The guest physical address of the request header.
+ */
+static int vmmdevReqHandler_HGCMConnect(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr)
+{
+ VMMDevHGCMConnect *pReq = (VMMDevHGCMConnect *)pReqHdr;
+ AssertMsgReturn(pReq->header.header.size >= sizeof(*pReq), ("%u\n", pReq->header.header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this is >= ... */
+
+ if (pThis->pHGCMDrv)
+ {
+ Log(("VMMDevReq_HGCMConnect\n"));
+ return vmmdevHGCMConnect(pThis, pReq, GCPhysReqHdr);
+ }
+
+ Log(("VMMDevReq_HGCMConnect: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Handles VMMDevReq_HGCMDisconnect.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @param GCPhysReqHdr The guest physical address of the request header.
+ */
+static int vmmdevReqHandler_HGCMDisconnect(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr)
+{
+ VMMDevHGCMDisconnect *pReq = (VMMDevHGCMDisconnect *)pReqHdr;
+ AssertMsgReturn(pReq->header.header.size >= sizeof(*pReq), ("%u\n", pReq->header.header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (pThis->pHGCMDrv)
+ {
+ Log(("VMMDevReq_VMMDevHGCMDisconnect\n"));
+ return vmmdevHGCMDisconnect(pThis, pReq, GCPhysReqHdr);
+ }
+
+ Log(("VMMDevReq_VMMDevHGCMDisconnect: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Handles VMMDevReq_HGCMCall.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @param GCPhysReqHdr The guest physical address of the request header.
+ * @param tsArrival The STAM_GET_TS() value when the request arrived.
+ * @param ppLock Pointer to the lock info pointer (latter can be
+ * NULL). Set to NULL if HGCM takes lock ownership.
+ */
+static int vmmdevReqHandler_HGCMCall(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr,
+ uint64_t tsArrival, PVMMDEVREQLOCK *ppLock)
+{
+ VMMDevHGCMCall *pReq = (VMMDevHGCMCall *)pReqHdr;
+ AssertMsgReturn(pReq->header.header.size >= sizeof(*pReq), ("%u\n", pReq->header.header.size), VERR_INVALID_PARAMETER);
+
+ if (pThis->pHGCMDrv)
+ {
+ Log2(("VMMDevReq_HGCMCall: sizeof(VMMDevHGCMRequest) = %04X\n", sizeof(VMMDevHGCMCall)));
+ Log2(("%.*Rhxd\n", pReq->header.header.size, pReq));
+
+ return vmmdevHGCMCall(pThis, pReq, pReq->header.header.size, GCPhysReqHdr, pReq->header.header.requestType,
+ tsArrival, ppLock);
+ }
+
+ Log(("VMMDevReq_HGCMCall: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+/**
+ * Handles VMMDevReq_HGCMCancel.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ * @param GCPhysReqHdr The guest physical address of the request header.
+ */
+static int vmmdevReqHandler_HGCMCancel(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr)
+{
+ VMMDevHGCMCancel *pReq = (VMMDevHGCMCancel *)pReqHdr;
+ AssertMsgReturn(pReq->header.header.size >= sizeof(*pReq), ("%u\n", pReq->header.header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (pThis->pHGCMDrv)
+ {
+ Log(("VMMDevReq_VMMDevHGCMCancel\n"));
+ return vmmdevHGCMCancel(pThis, pReq, GCPhysReqHdr);
+ }
+
+ Log(("VMMDevReq_VMMDevHGCMCancel: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Handles VMMDevReq_HGCMCancel2.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_HGCMCancel2(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevHGCMCancel2 *pReq = (VMMDevHGCMCancel2 *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (pThis->pHGCMDrv)
+ {
+ Log(("VMMDevReq_HGCMCancel2\n"));
+ return vmmdevHGCMCancel2(pThis, pReq->physReqToCancel);
+ }
+
+ Log(("VMMDevReq_HGCMCancel2: HGCM Connector is NULL!\n"));
+ return VERR_NOT_SUPPORTED;
+}
+
+#endif /* VBOX_WITH_HGCM */
+
+
+/**
+ * Handles VMMDevReq_VideoAccelEnable.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoAccelEnable(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoAccelEnable *pReq = (VMMDevVideoAccelEnable *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (!pThis->pDrv)
+ {
+ Log(("VMMDevReq_VideoAccelEnable Connector is NULL!!\n"));
+ return VERR_NOT_SUPPORTED;
+ }
+
+ if (pReq->cbRingBuffer != VMMDEV_VBVA_RING_BUFFER_SIZE)
+ {
+ /* The guest driver seems compiled with different headers. */
+ LogRelMax(16,("VMMDevReq_VideoAccelEnable guest ring buffer size %#x, should be %#x!!\n", pReq->cbRingBuffer, VMMDEV_VBVA_RING_BUFFER_SIZE));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /* The request is correct. */
+ pReq->fu32Status |= VBVA_F_STATUS_ACCEPTED;
+
+ LogFlow(("VMMDevReq_VideoAccelEnable pReq->u32Enable = %d\n", pReq->u32Enable));
+
+ int rc = pReq->u32Enable
+ ? pThis->pDrv->pfnVideoAccelEnable(pThis->pDrv, true, &pThis->pVMMDevRAMR3->vbvaMemory)
+ : pThis->pDrv->pfnVideoAccelEnable(pThis->pDrv, false, NULL);
+
+ if ( pReq->u32Enable
+ && RT_SUCCESS(rc))
+ {
+ pReq->fu32Status |= VBVA_F_STATUS_ENABLED;
+
+ /* Remember that guest successfully enabled acceleration.
+ * We need to reestablish it on restoring the VM from saved state.
+ */
+ pThis->u32VideoAccelEnabled = 1;
+ }
+ else
+ {
+ /* The acceleration was not enabled. Remember that. */
+ pThis->u32VideoAccelEnabled = 0;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_VideoAccelFlush.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoAccelFlush(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoAccelFlush *pReq = (VMMDevVideoAccelFlush *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); /** @todo Not sure why this >= ... */
+
+ if (!pThis->pDrv)
+ {
+ Log(("VMMDevReq_VideoAccelFlush: Connector is NULL!!!\n"));
+ return VERR_NOT_SUPPORTED;
+ }
+
+ pThis->pDrv->pfnVideoAccelFlush(pThis->pDrv);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_VideoSetVisibleRegion.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_VideoSetVisibleRegion(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVideoSetVisibleRegion *pReq = (VMMDevVideoSetVisibleRegion *)pReqHdr;
+ AssertMsgReturn(pReq->header.size + sizeof(RTRECT) >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ if (!pThis->pDrv)
+ {
+ Log(("VMMDevReq_VideoSetVisibleRegion: Connector is NULL!!!\n"));
+ return VERR_NOT_SUPPORTED;
+ }
+
+ if ( pReq->cRect > _1M /* restrict to sane range */
+ || pReq->header.size != sizeof(VMMDevVideoSetVisibleRegion) + pReq->cRect * sizeof(RTRECT) - sizeof(RTRECT))
+ {
+ Log(("VMMDevReq_VideoSetVisibleRegion: cRects=%#x doesn't match size=%#x or is out of bounds\n",
+ pReq->cRect, pReq->header.size));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ Log(("VMMDevReq_VideoSetVisibleRegion %d rectangles\n", pReq->cRect));
+ /* forward the call */
+ return pThis->pDrv->pfnSetVisibleRegion(pThis->pDrv, pReq->cRect, &pReq->Rect);
+}
+
+
+/**
+ * Handles VMMDevReq_GetSeamlessChangeRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetSeamlessChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevSeamlessChangeRequest *pReq = (VMMDevSeamlessChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* just pass on the information */
+ Log(("VMMDev: returning seamless change request mode=%d\n", pThis->fSeamlessEnabled));
+ if (pThis->fSeamlessEnabled)
+ pReq->mode = VMMDev_Seamless_Visible_Region;
+ else
+ pReq->mode = VMMDev_Seamless_Disabled;
+
+ if (pReq->eventAck == VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
+ {
+ /* Remember which mode the client has queried. */
+ pThis->fLastSeamlessEnabled = pThis->fSeamlessEnabled;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetVRDPChangeRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetVRDPChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevVRDPChangeRequest *pReq = (VMMDevVRDPChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* just pass on the information */
+ Log(("VMMDev: returning VRDP status %d level %d\n", pThis->fVRDPEnabled, pThis->uVRDPExperienceLevel));
+
+ pReq->u8VRDPActive = pThis->fVRDPEnabled;
+ pReq->u32VRDPExperienceLevel = pThis->uVRDPExperienceLevel;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetMemBalloonChangeRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetMemBalloonChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevGetMemBalloonChangeRequest *pReq = (VMMDevGetMemBalloonChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* just pass on the information */
+ Log(("VMMDev: returning memory balloon size =%d\n", pThis->cMbMemoryBalloon));
+ pReq->cBalloonChunks = pThis->cMbMemoryBalloon;
+ pReq->cPhysMemChunks = pThis->cbGuestRAM / (uint64_t)_1M;
+
+ if (pReq->eventAck == VMMDEV_EVENT_BALLOON_CHANGE_REQUEST)
+ {
+ /* Remember which mode the client has queried. */
+ pThis->cMbMemoryBalloonLast = pThis->cMbMemoryBalloon;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ChangeMemBalloon.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ChangeMemBalloon(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevChangeMemBalloon *pReq = (VMMDevChangeMemBalloon *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pReq->cPages == VMMDEV_MEMORY_BALLOON_CHUNK_PAGES, ("%u\n", pReq->cPages), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pReq->header.size == (uint32_t)RT_UOFFSETOF_DYN(VMMDevChangeMemBalloon, aPhysPage[pReq->cPages]),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ Log(("VMMDevReq_ChangeMemBalloon\n"));
+ int rc = PGMR3PhysChangeMemBalloon(PDMDevHlpGetVM(pThis->pDevInsR3), !!pReq->fInflate, pReq->cPages, pReq->aPhysPage);
+ if (pReq->fInflate)
+ STAM_REL_U32_INC(&pThis->StatMemBalloonChunks);
+ else
+ STAM_REL_U32_DEC(&pThis->StatMemBalloonChunks);
+ return rc;
+}
+
+
+/**
+ * Handles VMMDevReq_GetStatisticsChangeRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetStatisticsChangeRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevGetStatisticsChangeRequest *pReq = (VMMDevGetStatisticsChangeRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ Log(("VMMDevReq_GetStatisticsChangeRequest\n"));
+ /* just pass on the information */
+ Log(("VMMDev: returning statistics interval %d seconds\n", pThis->u32StatIntervalSize));
+ pReq->u32StatInterval = pThis->u32StatIntervalSize;
+
+ if (pReq->eventAck == VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST)
+ {
+ /* Remember which mode the client has queried. */
+ pThis->u32LastStatIntervalSize= pThis->u32StatIntervalSize;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportGuestStats.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportGuestStats(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReportGuestStats *pReq = (VMMDevReportGuestStats *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ Log(("VMMDevReq_ReportGuestStats\n"));
+#ifdef LOG_ENABLED
+ VBoxGuestStatistics *pGuestStats = &pReq->guestStats;
+
+ Log(("Current statistics:\n"));
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_CPU_LOAD_IDLE)
+ Log(("CPU%u: CPU Load Idle %-3d%%\n", pGuestStats->u32CpuId, pGuestStats->u32CpuLoad_Idle));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_CPU_LOAD_KERNEL)
+ Log(("CPU%u: CPU Load Kernel %-3d%%\n", pGuestStats->u32CpuId, pGuestStats->u32CpuLoad_Kernel));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_CPU_LOAD_USER)
+ Log(("CPU%u: CPU Load User %-3d%%\n", pGuestStats->u32CpuId, pGuestStats->u32CpuLoad_User));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_THREADS)
+ Log(("CPU%u: Thread %d\n", pGuestStats->u32CpuId, pGuestStats->u32Threads));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PROCESSES)
+ Log(("CPU%u: Processes %d\n", pGuestStats->u32CpuId, pGuestStats->u32Processes));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_HANDLES)
+ Log(("CPU%u: Handles %d\n", pGuestStats->u32CpuId, pGuestStats->u32Handles));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEMORY_LOAD)
+ Log(("CPU%u: Memory Load %d%%\n", pGuestStats->u32CpuId, pGuestStats->u32MemoryLoad));
+
+ /* Note that reported values are in pages; upper layers expect them in megabytes */
+ Log(("CPU%u: Page size %-4d bytes\n", pGuestStats->u32CpuId, pGuestStats->u32PageSize));
+ Assert(pGuestStats->u32PageSize == 4096);
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PHYS_MEM_TOTAL)
+ Log(("CPU%u: Total physical memory %-4d MB\n", pGuestStats->u32CpuId, (pGuestStats->u32PhysMemTotal + (_1M/_4K)-1) / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PHYS_MEM_AVAIL)
+ Log(("CPU%u: Free physical memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32PhysMemAvail / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PHYS_MEM_BALLOON)
+ Log(("CPU%u: Memory balloon size %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32PhysMemBalloon / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_COMMIT_TOTAL)
+ Log(("CPU%u: Committed memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemCommitTotal / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_KERNEL_TOTAL)
+ Log(("CPU%u: Total kernel memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemKernelTotal / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_KERNEL_PAGED)
+ Log(("CPU%u: Paged kernel memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemKernelPaged / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_KERNEL_NONPAGED)
+ Log(("CPU%u: Nonpaged kernel memory %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemKernelNonPaged / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_MEM_SYSTEM_CACHE)
+ Log(("CPU%u: System cache size %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32MemSystemCache / (_1M/_4K)));
+
+ if (pGuestStats->u32StatCaps & VBOX_GUEST_STAT_PAGE_FILE_SIZE)
+ Log(("CPU%u: Page file size %-4d MB\n", pGuestStats->u32CpuId, pGuestStats->u32PageFileSize / (_1M/_4K)));
+ Log(("Statistics end *******************\n"));
+#endif /* LOG_ENABLED */
+
+ /* forward the call */
+ return pThis->pDrv->pfnReportStatistics(pThis->pDrv, &pReq->guestStats);
+}
+
+
+/**
+ * Handles VMMDevReq_QueryCredentials.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_QueryCredentials(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevCredentials *pReq = (VMMDevCredentials *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* let's start by nulling out the data */
+ memset(pReq->szUserName, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pReq->szPassword, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pReq->szDomain, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+
+ /* should we return whether we got credentials for a logon? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_QUERYPRESENCE)
+ {
+ if ( pThis->pCredentials->Logon.szUserName[0]
+ || pThis->pCredentials->Logon.szPassword[0]
+ || pThis->pCredentials->Logon.szDomain[0])
+ pReq->u32Flags |= VMMDEV_CREDENTIALS_PRESENT;
+ else
+ pReq->u32Flags &= ~VMMDEV_CREDENTIALS_PRESENT;
+ }
+
+ /* does the guest want to read logon credentials? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_READ)
+ {
+ if (pThis->pCredentials->Logon.szUserName[0])
+ strcpy(pReq->szUserName, pThis->pCredentials->Logon.szUserName);
+ if (pThis->pCredentials->Logon.szPassword[0])
+ strcpy(pReq->szPassword, pThis->pCredentials->Logon.szPassword);
+ if (pThis->pCredentials->Logon.szDomain[0])
+ strcpy(pReq->szDomain, pThis->pCredentials->Logon.szDomain);
+ if (!pThis->pCredentials->Logon.fAllowInteractiveLogon)
+ pReq->u32Flags |= VMMDEV_CREDENTIALS_NOLOCALLOGON;
+ else
+ pReq->u32Flags &= ~VMMDEV_CREDENTIALS_NOLOCALLOGON;
+ }
+
+ if (!pThis->fKeepCredentials)
+ {
+ /* does the caller want us to destroy the logon credentials? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_CLEAR)
+ {
+ memset(pThis->pCredentials->Logon.szUserName, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pThis->pCredentials->Logon.szPassword, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pThis->pCredentials->Logon.szDomain, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ }
+ }
+
+ /* does the guest want to read credentials for verification? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_READJUDGE)
+ {
+ if (pThis->pCredentials->Judge.szUserName[0])
+ strcpy(pReq->szUserName, pThis->pCredentials->Judge.szUserName);
+ if (pThis->pCredentials->Judge.szPassword[0])
+ strcpy(pReq->szPassword, pThis->pCredentials->Judge.szPassword);
+ if (pThis->pCredentials->Judge.szDomain[0])
+ strcpy(pReq->szDomain, pThis->pCredentials->Judge.szDomain);
+ }
+
+ /* does the caller want us to destroy the judgement credentials? */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_CLEARJUDGE)
+ {
+ memset(pThis->pCredentials->Judge.szUserName, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pThis->pCredentials->Judge.szPassword, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pThis->pCredentials->Judge.szDomain, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_ReportCredentialsJudgement.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_ReportCredentialsJudgement(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevCredentials *pReq = (VMMDevCredentials *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /* what does the guest think about the credentials? (note: the order is important here!) */
+ if (pReq->u32Flags & VMMDEV_CREDENTIALS_JUDGE_DENY)
+ pThis->pDrv->pfnSetCredentialsJudgementResult(pThis->pDrv, VMMDEV_CREDENTIALS_JUDGE_DENY);
+ else if (pReq->u32Flags & VMMDEV_CREDENTIALS_JUDGE_NOJUDGEMENT)
+ pThis->pDrv->pfnSetCredentialsJudgementResult(pThis->pDrv, VMMDEV_CREDENTIALS_JUDGE_NOJUDGEMENT);
+ else if (pReq->u32Flags & VMMDEV_CREDENTIALS_JUDGE_OK)
+ pThis->pDrv->pfnSetCredentialsJudgementResult(pThis->pDrv, VMMDEV_CREDENTIALS_JUDGE_OK);
+ else
+ {
+ Log(("VMMDevReq_ReportCredentialsJudgement: invalid flags: %d!!!\n", pReq->u32Flags));
+ /** @todo why don't we return VERR_INVALID_PARAMETER to the guest? */
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetHostVersion.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pReqHdr The header of the request to handle.
+ * @since 3.1.0
+ * @note The ring-0 VBoxGuestLib uses this to check whether
+ * VMMDevHGCMParmType_PageList is supported.
+ */
+static int vmmdevReqHandler_GetHostVersion(VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqHostVersion *pReq = (VMMDevReqHostVersion *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->major = RTBldCfgVersionMajor();
+ pReq->minor = RTBldCfgVersionMinor();
+ pReq->build = RTBldCfgVersionBuild();
+ pReq->revision = RTBldCfgRevision();
+ pReq->features = VMMDEV_HVF_HGCM_PHYS_PAGE_LIST
+ | VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS
+ | VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST
+ | VMMDEV_HVF_FAST_IRQ_ACK;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_GetCpuHotPlugRequest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetCpuHotPlugRequest(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevGetCpuHotPlugRequest *pReq = (VMMDevGetCpuHotPlugRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->enmEventType = pThis->enmCpuHotPlugEvent;
+ pReq->idCpuCore = pThis->idCpuCore;
+ pReq->idCpuPackage = pThis->idCpuPackage;
+
+ /* Clear the event */
+ pThis->enmCpuHotPlugEvent = VMMDevCpuEventType_None;
+ pThis->idCpuCore = UINT32_MAX;
+ pThis->idCpuPackage = UINT32_MAX;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_SetCpuHotPlugStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_SetCpuHotPlugStatus(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevCpuHotPlugStatusRequest *pReq = (VMMDevCpuHotPlugStatusRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ if (pReq->enmStatusType == VMMDevCpuStatusType_Disable)
+ pThis->fCpuHotPlugEventsEnabled = false;
+ else if (pReq->enmStatusType == VMMDevCpuStatusType_Enable)
+ pThis->fCpuHotPlugEventsEnabled = true;
+ else
+ return VERR_INVALID_PARAMETER;
+ return VINF_SUCCESS;
+}
+
+
+#ifdef DEBUG
+/**
+ * Handles VMMDevReq_LogString.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_LogString(VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqLogString *pReq = (VMMDevReqLogString *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pReq->szString[pReq->header.size - RT_UOFFSETOF(VMMDevReqLogString, szString) - 1] == '\0',
+ ("not null terminated\n"), VERR_INVALID_PARAMETER);
+
+ LogIt(RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP_DEV_VMM_BACKDOOR, ("DEBUG LOG: %s", pReq->szString));
+ return VINF_SUCCESS;
+}
+#endif /* DEBUG */
+
+/**
+ * Handles VMMDevReq_GetSessionId.
+ *
+ * Get a unique "session" ID for this VM, where the ID will be different after each
+ * start, reset or restore of the VM. This can be used for restore detection
+ * inside the guest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetSessionId(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqSessionId *pReq = (VMMDevReqSessionId *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->idSession = pThis->idSession;
+ return VINF_SUCCESS;
+}
+
+
+#ifdef VBOX_WITH_PAGE_SHARING
+
+/**
+ * Handles VMMDevReq_RegisterSharedModule.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_RegisterSharedModule(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ /*
+ * Basic input validation (more done by GMM).
+ */
+ VMMDevSharedModuleRegistrationRequest *pReq = (VMMDevSharedModuleRegistrationRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size >= sizeof(VMMDevSharedModuleRegistrationRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pReq->header.size == RT_UOFFSETOF_DYN(VMMDevSharedModuleRegistrationRequest, aRegions[pReq->cRegions]),
+ ("%u cRegions=%u\n", pReq->header.size, pReq->cRegions), VERR_INVALID_PARAMETER);
+
+ AssertReturn(RTStrEnd(pReq->szName, sizeof(pReq->szName)), VERR_INVALID_PARAMETER);
+ AssertReturn(RTStrEnd(pReq->szVersion, sizeof(pReq->szVersion)), VERR_INVALID_PARAMETER);
+ int rc = RTStrValidateEncoding(pReq->szName);
+ AssertRCReturn(rc, rc);
+ rc = RTStrValidateEncoding(pReq->szVersion);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Forward the request to the VMM.
+ */
+ return PGMR3SharedModuleRegister(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->enmGuestOS, pReq->szName, pReq->szVersion,
+ pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
+}
+
+/**
+ * Handles VMMDevReq_UnregisterSharedModule.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_UnregisterSharedModule(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ /*
+ * Basic input validation.
+ */
+ VMMDevSharedModuleUnregistrationRequest *pReq = (VMMDevSharedModuleUnregistrationRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevSharedModuleUnregistrationRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ AssertReturn(RTStrEnd(pReq->szName, sizeof(pReq->szName)), VERR_INVALID_PARAMETER);
+ AssertReturn(RTStrEnd(pReq->szVersion, sizeof(pReq->szVersion)), VERR_INVALID_PARAMETER);
+ int rc = RTStrValidateEncoding(pReq->szName);
+ AssertRCReturn(rc, rc);
+ rc = RTStrValidateEncoding(pReq->szVersion);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Forward the request to the VMM.
+ */
+ return PGMR3SharedModuleUnregister(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->szName, pReq->szVersion,
+ pReq->GCBaseAddr, pReq->cbModule);
+}
+
+/**
+ * Handles VMMDevReq_CheckSharedModules.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_CheckSharedModules(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevSharedModuleCheckRequest *pReq = (VMMDevSharedModuleCheckRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevSharedModuleCheckRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+ return PGMR3SharedModuleCheckAll(PDMDevHlpGetVM(pThis->pDevInsR3));
+}
+
+/**
+ * Handles VMMDevReq_GetPageSharingStatus.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_GetPageSharingStatus(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevPageSharingStatusRequest *pReq = (VMMDevPageSharingStatusRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevPageSharingStatusRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ pReq->fEnabled = false;
+ int rc = pThis->pDrv->pfnIsPageFusionEnabled(pThis->pDrv, &pReq->fEnabled);
+ if (RT_FAILURE(rc))
+ pReq->fEnabled = false;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Handles VMMDevReq_DebugIsPageShared.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr The header of the request to handle.
+ */
+static int vmmdevReqHandler_DebugIsPageShared(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevPageIsSharedRequest *pReq = (VMMDevPageIsSharedRequest *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevPageIsSharedRequest),
+ ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+# ifdef DEBUG
+ return PGMR3SharedModuleGetPageState(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->GCPtrPage, &pReq->fShared, &pReq->uPageFlags);
+# else
+ RT_NOREF1(pThis);
+ return VERR_NOT_IMPLEMENTED;
+# endif
+}
+
+#endif /* VBOX_WITH_PAGE_SHARING */
+
+
+/**
+ * Handles VMMDevReq_WriteCoreDumpe
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pReqHdr Pointer to the request header.
+ */
+static int vmmdevReqHandler_WriteCoreDump(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr)
+{
+ VMMDevReqWriteCoreDump *pReq = (VMMDevReqWriteCoreDump *)pReqHdr;
+ AssertMsgReturn(pReq->header.size == sizeof(VMMDevReqWriteCoreDump), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER);
+
+ /*
+ * Only available if explicitly enabled by the user.
+ */
+ if (!pThis->fGuestCoreDumpEnabled)
+ return VERR_ACCESS_DENIED;
+
+ /*
+ * User makes sure the directory exists before composing the path.
+ */
+ if (!RTDirExists(pThis->szGuestCoreDumpDir))
+ return VERR_PATH_NOT_FOUND;
+
+ char szCorePath[RTPATH_MAX];
+ RTStrCopy(szCorePath, sizeof(szCorePath), pThis->szGuestCoreDumpDir);
+ RTPathAppend(szCorePath, sizeof(szCorePath), "VBox.core");
+
+ /*
+ * Rotate existing cores based on number of additional cores to keep around.
+ */
+ if (pThis->cGuestCoreDumps > 0)
+ for (int64_t i = pThis->cGuestCoreDumps - 1; i >= 0; i--)
+ {
+ char szFilePathOld[RTPATH_MAX];
+ if (i == 0)
+ RTStrCopy(szFilePathOld, sizeof(szFilePathOld), szCorePath);
+ else
+ RTStrPrintf(szFilePathOld, sizeof(szFilePathOld), "%s.%lld", szCorePath, i);
+
+ char szFilePathNew[RTPATH_MAX];
+ RTStrPrintf(szFilePathNew, sizeof(szFilePathNew), "%s.%lld", szCorePath, i + 1);
+ int vrc = RTFileMove(szFilePathOld, szFilePathNew, RTFILEMOVE_FLAGS_REPLACE);
+ if (vrc == VERR_FILE_NOT_FOUND)
+ RTFileDelete(szFilePathNew);
+ }
+
+ /*
+ * Write the core file.
+ */
+ PUVM pUVM = PDMDevHlpGetUVM(pThis->pDevInsR3);
+ return DBGFR3CoreWrite(pUVM, szCorePath, true /*fReplaceFile*/);
+}
+
+
+/**
+ * Sets request status to VINF_HGCM_ASYNC_EXECUTE.
+ *
+ * @param pThis The VMM device instance data.
+ * @param GCPhysReqHdr The guest physical address of the request.
+ * @param pLock Pointer to the request locking info. NULL if not
+ * locked.
+ */
+DECLINLINE(void) vmmdevReqHdrSetHgcmAsyncExecute(PVMMDEV pThis, RTGCPHYS GCPhysReqHdr, PVMMDEVREQLOCK pLock)
+{
+ if (pLock)
+ ((VMMDevRequestHeader volatile *)pLock->pvReq)->rc = VINF_HGCM_ASYNC_EXECUTE;
+ else
+ {
+ int32_t rcReq = VINF_HGCM_ASYNC_EXECUTE;
+ PDMDevHlpPhysWrite(pThis->pDevInsR3, GCPhysReqHdr + RT_UOFFSETOF(VMMDevRequestHeader, rc), &rcReq, sizeof(rcReq));
+ }
+}
+
+
+/** @name VMMDEVREQDISP_POST_F_XXX - post dispatcher optimizations.
+ * @{ */
+#define VMMDEVREQDISP_POST_F_NO_WRITE_OUT RT_BIT_32(0)
+/** @} */
+
+
+/**
+ * Dispatch the request to the appropriate handler function.
+ *
+ * @returns Port I/O handler exit code.
+ * @param pThis The VMM device instance data.
+ * @param pReqHdr The request header (cached in host memory).
+ * @param GCPhysReqHdr The guest physical address of the request (for
+ * HGCM).
+ * @param tsArrival The STAM_GET_TS() value when the request arrived.
+ * @param pfPostOptimize HGCM optimizations, VMMDEVREQDISP_POST_F_XXX.
+ * @param ppLock Pointer to the lock info pointer (latter can be
+ * NULL). Set to NULL if HGCM takes lock ownership.
+ */
+static int vmmdevReqDispatcher(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr,
+ uint64_t tsArrival, uint32_t *pfPostOptimize, PVMMDEVREQLOCK *ppLock)
+{
+ int rcRet = VINF_SUCCESS;
+ Assert(*pfPostOptimize == 0);
+
+ switch (pReqHdr->requestType)
+ {
+ case VMMDevReq_ReportGuestInfo:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestInfo(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestInfo2:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestInfo2(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestStatus:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestStatus(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestUserState:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestUserState(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestCapabilities:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestCapabilities(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_SetGuestCapabilities:
+ pReqHdr->rc = vmmdevReqHandler_SetGuestCapabilities(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_WriteCoreDump:
+ pReqHdr->rc = vmmdevReqHandler_WriteCoreDump(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetMouseStatus:
+ pReqHdr->rc = vmmdevReqHandler_GetMouseStatus(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_SetMouseStatus:
+ pReqHdr->rc = vmmdevReqHandler_SetMouseStatus(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_SetPointerShape:
+ pReqHdr->rc = vmmdevReqHandler_SetPointerShape(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetHostTime:
+ pReqHdr->rc = vmmdevReqHandler_GetHostTime(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetHypervisorInfo:
+ pReqHdr->rc = vmmdevReqHandler_GetHypervisorInfo(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_SetHypervisorInfo:
+ pReqHdr->rc = vmmdevReqHandler_SetHypervisorInfo(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_RegisterPatchMemory:
+ pReqHdr->rc = vmmdevReqHandler_RegisterPatchMemory(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_DeregisterPatchMemory:
+ pReqHdr->rc = vmmdevReqHandler_DeregisterPatchMemory(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_SetPowerStatus:
+ {
+ int rc = pReqHdr->rc = vmmdevReqHandler_SetPowerStatus(pThis, pReqHdr);
+ if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
+ rcRet = rc;
+ break;
+ }
+
+ case VMMDevReq_GetDisplayChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetDisplayChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetDisplayChangeRequest2:
+ pReqHdr->rc = vmmdevReqHandler_GetDisplayChangeRequest2(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetDisplayChangeRequestEx:
+ pReqHdr->rc = vmmdevReqHandler_GetDisplayChangeRequestEx(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetDisplayChangeRequestMulti:
+ pReqHdr->rc = vmmdevReqHandler_GetDisplayChangeRequestMulti(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_VideoModeSupported:
+ pReqHdr->rc = vmmdevReqHandler_VideoModeSupported(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_VideoModeSupported2:
+ pReqHdr->rc = vmmdevReqHandler_VideoModeSupported2(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetHeightReduction:
+ pReqHdr->rc = vmmdevReqHandler_GetHeightReduction(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_AcknowledgeEvents:
+ pReqHdr->rc = vmmdevReqHandler_AcknowledgeEvents(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_CtlGuestFilterMask:
+ pReqHdr->rc = vmmdevReqHandler_CtlGuestFilterMask(pThis, pReqHdr);
+ break;
+
+#ifdef VBOX_WITH_HGCM
+ case VMMDevReq_HGCMConnect:
+ vmmdevReqHdrSetHgcmAsyncExecute(pThis, GCPhysReqHdr, *ppLock);
+ pReqHdr->rc = vmmdevReqHandler_HGCMConnect(pThis, pReqHdr, GCPhysReqHdr);
+ Assert(pReqHdr->rc == VINF_HGCM_ASYNC_EXECUTE || RT_FAILURE_NP(pReqHdr->rc));
+ if (RT_SUCCESS(pReqHdr->rc))
+ *pfPostOptimize |= VMMDEVREQDISP_POST_F_NO_WRITE_OUT;
+ break;
+
+ case VMMDevReq_HGCMDisconnect:
+ vmmdevReqHdrSetHgcmAsyncExecute(pThis, GCPhysReqHdr, *ppLock);
+ pReqHdr->rc = vmmdevReqHandler_HGCMDisconnect(pThis, pReqHdr, GCPhysReqHdr);
+ Assert(pReqHdr->rc == VINF_HGCM_ASYNC_EXECUTE || RT_FAILURE_NP(pReqHdr->rc));
+ if (RT_SUCCESS(pReqHdr->rc))
+ *pfPostOptimize |= VMMDEVREQDISP_POST_F_NO_WRITE_OUT;
+ break;
+
+# ifdef VBOX_WITH_64_BITS_GUESTS
+ case VMMDevReq_HGCMCall32:
+ case VMMDevReq_HGCMCall64:
+# else
+ case VMMDevReq_HGCMCall:
+# endif /* VBOX_WITH_64_BITS_GUESTS */
+ vmmdevReqHdrSetHgcmAsyncExecute(pThis, GCPhysReqHdr, *ppLock);
+ pReqHdr->rc = vmmdevReqHandler_HGCMCall(pThis, pReqHdr, GCPhysReqHdr, tsArrival, ppLock);
+ Assert(pReqHdr->rc == VINF_HGCM_ASYNC_EXECUTE || RT_FAILURE_NP(pReqHdr->rc));
+ if (RT_SUCCESS(pReqHdr->rc))
+ *pfPostOptimize |= VMMDEVREQDISP_POST_F_NO_WRITE_OUT;
+ break;
+
+ case VMMDevReq_HGCMCancel:
+ pReqHdr->rc = vmmdevReqHandler_HGCMCancel(pThis, pReqHdr, GCPhysReqHdr);
+ break;
+
+ case VMMDevReq_HGCMCancel2:
+ pReqHdr->rc = vmmdevReqHandler_HGCMCancel2(pThis, pReqHdr);
+ break;
+#endif /* VBOX_WITH_HGCM */
+
+ case VMMDevReq_VideoAccelEnable:
+ pReqHdr->rc = vmmdevReqHandler_VideoAccelEnable(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_VideoAccelFlush:
+ pReqHdr->rc = vmmdevReqHandler_VideoAccelFlush(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_VideoSetVisibleRegion:
+ pReqHdr->rc = vmmdevReqHandler_VideoSetVisibleRegion(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetSeamlessChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetSeamlessChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetVRDPChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetVRDPChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetMemBalloonChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetMemBalloonChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_ChangeMemBalloon:
+ pReqHdr->rc = vmmdevReqHandler_ChangeMemBalloon(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetStatisticsChangeRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetStatisticsChangeRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportGuestStats:
+ pReqHdr->rc = vmmdevReqHandler_ReportGuestStats(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_QueryCredentials:
+ pReqHdr->rc = vmmdevReqHandler_QueryCredentials(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_ReportCredentialsJudgement:
+ pReqHdr->rc = vmmdevReqHandler_ReportCredentialsJudgement(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetHostVersion:
+ pReqHdr->rc = vmmdevReqHandler_GetHostVersion(pReqHdr);
+ break;
+
+ case VMMDevReq_GetCpuHotPlugRequest:
+ pReqHdr->rc = vmmdevReqHandler_GetCpuHotPlugRequest(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_SetCpuHotPlugStatus:
+ pReqHdr->rc = vmmdevReqHandler_SetCpuHotPlugStatus(pThis, pReqHdr);
+ break;
+
+#ifdef VBOX_WITH_PAGE_SHARING
+ case VMMDevReq_RegisterSharedModule:
+ pReqHdr->rc = vmmdevReqHandler_RegisterSharedModule(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_UnregisterSharedModule:
+ pReqHdr->rc = vmmdevReqHandler_UnregisterSharedModule(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_CheckSharedModules:
+ pReqHdr->rc = vmmdevReqHandler_CheckSharedModules(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_GetPageSharingStatus:
+ pReqHdr->rc = vmmdevReqHandler_GetPageSharingStatus(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_DebugIsPageShared:
+ pReqHdr->rc = vmmdevReqHandler_DebugIsPageShared(pThis, pReqHdr);
+ break;
+
+#endif /* VBOX_WITH_PAGE_SHARING */
+
+#ifdef DEBUG
+ case VMMDevReq_LogString:
+ pReqHdr->rc = vmmdevReqHandler_LogString(pReqHdr);
+ break;
+#endif
+
+ case VMMDevReq_GetSessionId:
+ pReqHdr->rc = vmmdevReqHandler_GetSessionId(pThis, pReqHdr);
+ break;
+
+ /*
+ * Guest wants to give up a timeslice.
+ * Note! This was only ever used by experimental GAs!
+ */
+ /** @todo maybe we could just remove this? */
+ case VMMDevReq_Idle:
+ {
+ /* just return to EMT telling it that we want to halt */
+ rcRet = VINF_EM_HALT;
+ break;
+ }
+
+ case VMMDevReq_GuestHeartbeat:
+ pReqHdr->rc = vmmDevReqHandler_GuestHeartbeat(pThis);
+ break;
+
+ case VMMDevReq_HeartbeatConfigure:
+ pReqHdr->rc = vmmDevReqHandler_HeartbeatConfigure(pThis, pReqHdr);
+ break;
+
+ case VMMDevReq_NtBugCheck:
+ pReqHdr->rc = vmmDevReqHandler_NtBugCheck(pThis, pReqHdr);
+ break;
+
+ default:
+ {
+ pReqHdr->rc = VERR_NOT_IMPLEMENTED;
+ Log(("VMMDev unknown request type %d\n", pReqHdr->requestType));
+ break;
+ }
+ }
+ return rcRet;
+}
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTOUT,
+ * Port I/O write andler for the generic request interface.}
+ */
+static DECLCALLBACK(int) vmmdevRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t u32, unsigned cb)
+{
+ uint64_t tsArrival;
+ STAM_GET_TS(tsArrival);
+
+ RT_NOREF2(Port, cb);
+ PVMMDEV pThis = (VMMDevState *)pvUser;
+
+ /*
+ * The caller has passed the guest context physical address of the request
+ * structure. We'll copy all of it into a heap buffer eventually, but we
+ * will have to start off with the header.
+ */
+ VMMDevRequestHeader requestHeader;
+ RT_ZERO(requestHeader);
+ PDMDevHlpPhysRead(pDevIns, (RTGCPHYS)u32, &requestHeader, sizeof(requestHeader));
+
+ /* The structure size must be greater or equal to the header size. */
+ if (requestHeader.size < sizeof(VMMDevRequestHeader))
+ {
+ Log(("VMMDev request header size too small! size = %d\n", requestHeader.size));
+ return VINF_SUCCESS;
+ }
+
+ /* Check the version of the header structure. */
+ if (requestHeader.version != VMMDEV_REQUEST_HEADER_VERSION)
+ {
+ Log(("VMMDev: guest header version (0x%08X) differs from ours (0x%08X)\n", requestHeader.version, VMMDEV_REQUEST_HEADER_VERSION));
+ return VINF_SUCCESS;
+ }
+
+ Log2(("VMMDev request issued: %d\n", requestHeader.requestType));
+
+ int rcRet = VINF_SUCCESS;
+ /* Check that is doesn't exceed the max packet size. */
+ if (requestHeader.size <= VMMDEV_MAX_VMMDEVREQ_SIZE)
+ {
+ /*
+ * We require the GAs to report it's information before we let it have
+ * access to all the functions. The VMMDevReq_ReportGuestInfo request
+ * is the one which unlocks the access. Newer additions will first
+ * issue VMMDevReq_ReportGuestInfo2, older ones doesn't know this one.
+ * Two exceptions: VMMDevReq_GetHostVersion and VMMDevReq_WriteCoreDump.
+ */
+ if ( pThis->fu32AdditionsOk
+ || requestHeader.requestType == VMMDevReq_ReportGuestInfo2
+ || requestHeader.requestType == VMMDevReq_ReportGuestInfo
+ || requestHeader.requestType == VMMDevReq_WriteCoreDump
+ || requestHeader.requestType == VMMDevReq_GetHostVersion
+ )
+ {
+ /*
+ * The request looks fine. Copy it into a buffer.
+ *
+ * The buffer is only used while on this thread, and this thread is one
+ * of the EMTs, so we keep a 4KB buffer for each EMT around to avoid
+ * wasting time with the heap. Larger allocations goes to the heap, though.
+ */
+ VMCPUID iCpu = PDMDevHlpGetCurrentCpuId(pDevIns);
+ VMMDevRequestHeader *pRequestHeaderFree = NULL;
+ VMMDevRequestHeader *pRequestHeader = NULL;
+ if ( requestHeader.size <= _4K
+ && iCpu < RT_ELEMENTS(pThis->apReqBufs))
+ {
+ pRequestHeader = pThis->apReqBufs[iCpu];
+ if (pRequestHeader)
+ { /* likely */ }
+ else
+ pThis->apReqBufs[iCpu] = pRequestHeader = (VMMDevRequestHeader *)RTMemPageAlloc(_4K);
+ }
+ else
+ {
+ Assert(iCpu != NIL_VMCPUID);
+ STAM_REL_COUNTER_INC(&pThis->StatReqBufAllocs);
+ pRequestHeaderFree = pRequestHeader = (VMMDevRequestHeader *)RTMemAlloc(RT_MAX(requestHeader.size, 512));
+ }
+ if (pRequestHeader)
+ {
+ memcpy(pRequestHeader, &requestHeader, sizeof(VMMDevRequestHeader));
+
+ /* Try lock the request if it's a HGCM call and not crossing a page boundrary.
+ Saves on PGM interaction. */
+ VMMDEVREQLOCK Lock = { NULL, { 0, NULL } };
+ PVMMDEVREQLOCK pLock = NULL;
+ size_t cbLeft = requestHeader.size - sizeof(VMMDevRequestHeader);
+ if (cbLeft)
+ {
+ if ( ( requestHeader.requestType == VMMDevReq_HGCMCall32
+ || requestHeader.requestType == VMMDevReq_HGCMCall64)
+ && ((u32 + requestHeader.size) >> X86_PAGE_SHIFT) == (u32 >> X86_PAGE_SHIFT)
+ && RT_SUCCESS(PDMDevHlpPhysGCPhys2CCPtr(pDevIns, u32, 0 /*fFlags*/, &Lock.pvReq, &Lock.Lock)) )
+ {
+ memcpy((uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader),
+ (uint8_t *)Lock.pvReq + sizeof(VMMDevRequestHeader), cbLeft);
+ pLock = &Lock;
+ }
+ else
+ PDMDevHlpPhysRead(pDevIns,
+ (RTGCPHYS)u32 + sizeof(VMMDevRequestHeader),
+ (uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader),
+ cbLeft);
+ }
+
+ /*
+ * Feed buffered request thru the dispatcher.
+ */
+ uint32_t fPostOptimize = 0;
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+ rcRet = vmmdevReqDispatcher(pThis, pRequestHeader, u32, tsArrival, &fPostOptimize, &pLock);
+ PDMCritSectLeave(&pThis->CritSect);
+
+ /*
+ * Write the result back to guest memory (unless it is a locked HGCM call).
+ */
+ if (!(fPostOptimize & VMMDEVREQDISP_POST_F_NO_WRITE_OUT))
+ {
+ if (pLock)
+ memcpy(pLock->pvReq, pRequestHeader, pRequestHeader->size);
+ else
+ PDMDevHlpPhysWrite(pDevIns, u32, pRequestHeader, pRequestHeader->size);
+ }
+
+ if (!pRequestHeaderFree)
+ { /* likely */ }
+ else
+ RTMemFree(pRequestHeaderFree);
+ return rcRet;
+ }
+
+ Log(("VMMDev: RTMemAlloc failed!\n"));
+ requestHeader.rc = VERR_NO_MEMORY;
+ }
+ else
+ {
+ LogRelMax(10, ("VMMDev: Guest has not yet reported to us -- refusing operation of request #%d\n",
+ requestHeader.requestType));
+ requestHeader.rc = VERR_NOT_SUPPORTED;
+ }
+ }
+ else
+ {
+ LogRelMax(50, ("VMMDev: Request packet too big (%x), refusing operation\n", requestHeader.size));
+ requestHeader.rc = VERR_NOT_SUPPORTED;
+ }
+
+ /*
+ * Write the result back to guest memory.
+ */
+ PDMDevHlpPhysWrite(pDevIns, u32, &requestHeader, sizeof(requestHeader));
+
+ return rcRet;
+}
+
+#endif /* IN_RING3 */
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTOUT, Port I/O write handler for requests
+ * that can be handled w/o going to ring-3.}
+ */
+PDMBOTHCBDECL(int) vmmdevFastRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t u32, unsigned cb)
+{
+#ifndef IN_RING3
+# if 0 /* This functionality is offered through reading the port (vmmdevFastRequestIrqAck). Leaving it here for later. */
+ PVMMDEV pThis = (VMMDevState *)pvUser;
+ Assert(PDMINS_2_DATA(pDevIns, PVMMDEV) == pThis);
+ RT_NOREF2(Port, cb);
+
+ /*
+ * We only process a limited set of requests here, reflecting the rest down
+ * to ring-3. So, try read the whole request into a stack buffer and check
+ * if we can handle it.
+ */
+ union
+ {
+ VMMDevRequestHeader Hdr;
+ VMMDevEvents Ack;
+ } uReq;
+ RT_ZERO(uReq);
+
+ VBOXSTRICTRC rcStrict;
+ if (pThis->fu32AdditionsOk)
+ {
+ /* Read it into memory. */
+ uint32_t cbToRead = sizeof(uReq); /* (Adjust to stay within a page if we support more than ack requests.) */
+ rcStrict = PDMDevHlpPhysRead(pDevIns, u32, &uReq, cbToRead);
+ if (rcStrict == VINF_SUCCESS)
+ {
+ /*
+ * Validate the request and check that we want to handle it here.
+ */
+ if ( uReq.Hdr.size >= sizeof(uReq.Hdr)
+ && uReq.Hdr.version == VMMDEV_REQUEST_HEADER_VERSION
+ && ( uReq.Hdr.requestType == VMMDevReq_AcknowledgeEvents
+ && uReq.Hdr.size == sizeof(uReq.Ack)
+ && cbToRead == sizeof(uReq.Ack)
+ && pThis->CTX_SUFF(pVMMDevRAM) != NULL)
+ )
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /*
+ * Try grab the critical section.
+ */
+ int rc2 = PDMCritSectEnter(&pThis->CritSect, VINF_IOM_R3_IOPORT_WRITE);
+ if (rc2 == VINF_SUCCESS)
+ {
+ /*
+ * Handle the request and write back the result to the guest.
+ */
+ uReq.Hdr.rc = vmmdevReqHandler_AcknowledgeEvents(pThis, &uReq.Hdr);
+
+ rcStrict = PDMDevHlpPhysWrite(pDevIns, u32, &uReq, uReq.Hdr.size);
+ PDMCritSectLeave(&pThis->CritSect);
+ if (rcStrict == VINF_SUCCESS)
+ { /* likely */ }
+ else
+ Log(("vmmdevFastRequestHandler: PDMDevHlpPhysWrite(%#RX32+rc,4) -> %Rrc (%RTbool)\n",
+ u32, VBOXSTRICTRC_VAL(rcStrict), PGM_PHYS_RW_IS_SUCCESS(rcStrict) ));
+ }
+ else
+ {
+ Log(("vmmdevFastRequestHandler: PDMCritSectEnter -> %Rrc\n", rc2));
+ rcStrict = rc2;
+ }
+ }
+ else
+ {
+ Log(("vmmdevFastRequestHandler: size=%#x version=%#x requestType=%d (pVMMDevRAM=%p) -> R3\n",
+ uReq.Hdr.size, uReq.Hdr.version, uReq.Hdr.requestType, pThis->CTX_SUFF(pVMMDevRAM) ));
+ rcStrict = VINF_IOM_R3_IOPORT_WRITE;
+ }
+ }
+ else
+ Log(("vmmdevFastRequestHandler: PDMDevHlpPhysRead(%#RX32,%#RX32) -> %Rrc\n", u32, cbToRead, VBOXSTRICTRC_VAL(rcStrict)));
+ }
+ else
+ {
+ Log(("vmmdevFastRequestHandler: additions nok-okay\n"));
+ rcStrict = VINF_IOM_R3_IOPORT_WRITE;
+ }
+
+ return VBOXSTRICTRC_VAL(rcStrict);
+# else
+ RT_NOREF(pDevIns, pvUser, Port, u32, cb);
+ return VINF_IOM_R3_IOPORT_WRITE;
+# endif
+
+#else /* IN_RING3 */
+ return vmmdevRequestHandler(pDevIns, pvUser, Port, u32, cb);
+#endif /* IN_RING3 */
+}
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTIN,
+ * Port I/O read handler for IRQ acknowledging and getting pending events (same
+ * as VMMDevReq_AcknowledgeEvents - just faster).}
+ */
+PDMBOTHCBDECL(int) vmmdevFastRequestIrqAck(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t *pu32, unsigned cb)
+{
+ PVMMDEV pThis = (VMMDevState *)pvUser;
+ Assert(PDMINS_2_DATA(pDevIns, PVMMDEV) == pThis);
+ RT_NOREF(Port);
+
+ /* Only 32-bit accesses. */
+ ASSERT_GUEST_MSG_RETURN(cb == sizeof(uint32_t), ("cb=%d\n", cb), VERR_IOM_IOPORT_UNUSED);
+
+ /* The VMMDev memory mapping might've failed, go to ring-3 in that case. */
+ VBOXSTRICTRC rcStrict;
+#ifndef IN_RING3
+ if (pThis->CTX_SUFF(pVMMDevRAM) != NULL)
+#endif
+ {
+ /* Enter critical section and check that the additions has been properly
+ initialized and that we're not in legacy v1.3 device mode. */
+ rcStrict = PDMCritSectEnter(&pThis->CritSect, VINF_IOM_R3_IOPORT_READ);
+ if (rcStrict == VINF_SUCCESS)
+ {
+ if ( pThis->fu32AdditionsOk
+ && !VMMDEV_INTERFACE_VERSION_IS_1_03(pThis))
+ {
+ /*
+ * Do the job.
+ *
+ * Note! This code is duplicated in vmmdevReqHandler_AcknowledgeEvents.
+ */
+ STAM_REL_COUNTER_INC(&pThis->CTX_SUFF_Z(StatFastIrqAck));
+
+ if (pThis->fNewGuestFilterMask)
+ {
+ pThis->fNewGuestFilterMask = false;
+ pThis->u32GuestFilterMask = pThis->u32NewGuestFilterMask;
+ }
+
+ *pu32 = pThis->u32HostEventFlags & pThis->u32GuestFilterMask;
+
+ pThis->u32HostEventFlags &= ~pThis->u32GuestFilterMask;
+ pThis->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false;
+
+ PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0);
+ }
+ else
+ {
+ Log(("vmmdevFastRequestIrqAck: fu32AdditionsOk=%d interfaceVersion=%#x\n", pThis->fu32AdditionsOk,
+ pThis->guestInfo.interfaceVersion));
+ *pu32 = UINT32_MAX;
+ }
+
+ PDMCritSectLeave(&pThis->CritSect);
+ }
+ }
+#ifndef IN_RING3
+ else
+ rcStrict = VINF_IOM_R3_IOPORT_READ;
+#endif
+ return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+
+#ifdef IN_RING3
+
+/* -=-=-=-=-=- PCI Device -=-=-=-=-=- */
+
+
+/**
+ * @callback_method_impl{FNPCIIOREGIONMAP,MMIO/MMIO2 regions}
+ */
+static DECLCALLBACK(int) vmmdevIORAMRegionMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
+ RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
+{
+ RT_NOREF1(cb);
+ LogFlow(("vmmdevR3IORAMRegionMap: iRegion=%d GCPhysAddress=%RGp cb=%RGp enmType=%d\n", iRegion, GCPhysAddress, cb, enmType));
+ PVMMDEV pThis = RT_FROM_MEMBER(pPciDev, VMMDEV, PciDev);
+ int rc;
+
+ if (iRegion == 1)
+ {
+ AssertReturn(enmType == PCI_ADDRESS_SPACE_MEM, VERR_INTERNAL_ERROR);
+ Assert(pThis->pVMMDevRAMR3 != NULL);
+ if (GCPhysAddress != NIL_RTGCPHYS)
+ {
+ /*
+ * Map the MMIO2 memory.
+ */
+ pThis->GCPhysVMMDevRAM = GCPhysAddress;
+ Assert(pThis->GCPhysVMMDevRAM == GCPhysAddress);
+ rc = PDMDevHlpMMIOExMap(pDevIns, pPciDev, iRegion, GCPhysAddress);
+ }
+ else
+ {
+ /*
+ * It is about to be unmapped, just clean up.
+ */
+ pThis->GCPhysVMMDevRAM = NIL_RTGCPHYS32;
+ rc = VINF_SUCCESS;
+ }
+ }
+ else if (iRegion == 2)
+ {
+ AssertReturn(enmType == PCI_ADDRESS_SPACE_MEM_PREFETCH, VERR_INTERNAL_ERROR);
+ Assert(pThis->pVMMDevHeapR3 != NULL);
+ if (GCPhysAddress != NIL_RTGCPHYS)
+ {
+ /*
+ * Map the MMIO2 memory.
+ */
+ pThis->GCPhysVMMDevHeap = GCPhysAddress;
+ Assert(pThis->GCPhysVMMDevHeap == GCPhysAddress);
+ rc = PDMDevHlpMMIOExMap(pDevIns, pPciDev, iRegion, GCPhysAddress);
+ if (RT_SUCCESS(rc))
+ rc = PDMDevHlpRegisterVMMDevHeap(pDevIns, GCPhysAddress, pThis->pVMMDevHeapR3, VMMDEV_HEAP_SIZE);
+ }
+ else
+ {
+ /*
+ * It is about to be unmapped, just clean up.
+ */
+ PDMDevHlpRegisterVMMDevHeap(pDevIns, NIL_RTGCPHYS, pThis->pVMMDevHeapR3, VMMDEV_HEAP_SIZE);
+ pThis->GCPhysVMMDevHeap = NIL_RTGCPHYS32;
+ rc = VINF_SUCCESS;
+ }
+ }
+ else
+ {
+ AssertMsgFailed(("%d\n", iRegion));
+ rc = VERR_INVALID_PARAMETER;
+ }
+
+ return rc;
+}
+
+
+/**
+ * @callback_method_impl{FNPCIIOREGIONMAP,I/O Port Region}
+ */
+static DECLCALLBACK(int) vmmdevIOPortRegionMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
+ RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
+{
+ LogFlow(("vmmdevIOPortRegionMap: iRegion=%d GCPhysAddress=%RGp cb=%RGp enmType=%d\n", iRegion, GCPhysAddress, cb, enmType));
+ RT_NOREF3(iRegion, cb, enmType);
+ PVMMDEV pThis = RT_FROM_MEMBER(pPciDev, VMMDEV, PciDev);
+
+ Assert(enmType == PCI_ADDRESS_SPACE_IO);
+ Assert(iRegion == 0);
+ AssertMsg(RT_ALIGN(GCPhysAddress, 8) == GCPhysAddress, ("Expected 8 byte alignment. GCPhysAddress=%#x\n", GCPhysAddress));
+
+ /*
+ * Register our port IO handlers.
+ */
+ int rc = PDMDevHlpIOPortRegister(pDevIns, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST, 1,
+ pThis, vmmdevRequestHandler, NULL, NULL, NULL, "VMMDev Request Handler");
+ AssertLogRelRCReturn(rc, rc);
+
+ /* The fast one: */
+ rc = PDMDevHlpIOPortRegister(pDevIns, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST_FAST, 1,
+ pThis, vmmdevFastRequestHandler, vmmdevFastRequestIrqAck, NULL, NULL, "VMMDev Fast R0/RC Requests");
+ AssertLogRelRCReturn(rc, rc);
+ if (pThis->fRZEnabled)
+ {
+ rc = PDMDevHlpIOPortRegisterR0(pDevIns, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST_FAST, 1,
+ PDMINS_2_DATA_R0PTR(pDevIns), "vmmdevFastRequestHandler", "vmmdevFastRequestIrqAck",
+ NULL, NULL, "VMMDev Fast R0/RC Requests");
+ AssertLogRelRCReturn(rc, rc);
+ rc = PDMDevHlpIOPortRegisterRC(pDevIns, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST_FAST, 1,
+ PDMINS_2_DATA_RCPTR(pDevIns), "vmmdevFastRequestHandler", "vmmdevFastRequestIrqAck",
+ NULL, NULL, "VMMDev Fast R0/RC Requests");
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+ return rc;
+}
+
+
+
+/* -=-=-=-=-=- Backdoor Logging and Time Sync. -=-=-=-=-=- */
+
+/**
+ * @callback_method_impl{FNIOMIOPORTOUT, Backdoor Logging.}
+ */
+static DECLCALLBACK(int) vmmdevBackdoorLog(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t u32, unsigned cb)
+{
+ RT_NOREF1(pvUser);
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, VMMDevState *);
+
+ if (!pThis->fBackdoorLogDisabled && cb == 1 && Port == RTLOG_DEBUG_PORT)
+ {
+
+ /* The raw version. */
+ switch (u32)
+ {
+ case '\r': LogIt(RTLOGGRPFLAGS_LEVEL_2, LOG_GROUP_DEV_VMM_BACKDOOR, ("vmmdev: <return>\n")); break;
+ case '\n': LogIt(RTLOGGRPFLAGS_LEVEL_2, LOG_GROUP_DEV_VMM_BACKDOOR, ("vmmdev: <newline>\n")); break;
+ case '\t': LogIt(RTLOGGRPFLAGS_LEVEL_2, LOG_GROUP_DEV_VMM_BACKDOOR, ("vmmdev: <tab>\n")); break;
+ default: LogIt(RTLOGGRPFLAGS_LEVEL_2, LOG_GROUP_DEV_VMM_BACKDOOR, ("vmmdev: %c (%02x)\n", u32, u32)); break;
+ }
+
+ /* The readable, buffered version. */
+ if (u32 == '\n' || u32 == '\r')
+ {
+ pThis->szMsg[pThis->iMsg] = '\0';
+ if (pThis->iMsg)
+ LogRelIt(RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP_DEV_VMM_BACKDOOR, ("VMMDev: Guest Log: %s\n", pThis->szMsg));
+ pThis->iMsg = 0;
+ }
+ else
+ {
+ if (pThis->iMsg >= sizeof(pThis->szMsg)-1)
+ {
+ pThis->szMsg[pThis->iMsg] = '\0';
+ LogRelIt(RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP_DEV_VMM_BACKDOOR, ("VMMDev: Guest Log: %s\n", pThis->szMsg));
+ pThis->iMsg = 0;
+ }
+ pThis->szMsg[pThis->iMsg] = (char )u32;
+ pThis->szMsg[++pThis->iMsg] = '\0';
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+#ifdef VMMDEV_WITH_ALT_TIMESYNC
+
+/**
+ * @callback_method_impl{FNIOMIOPORTOUT, Alternative time synchronization.}
+ */
+static DECLCALLBACK(int) vmmdevAltTimeSyncWrite(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t u32, unsigned cb)
+{
+ RT_NOREF2(pvUser, Port);
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, VMMDevState *);
+ if (cb == 4)
+ {
+ /* Selects high (0) or low (1) DWORD. The high has to be read first. */
+ switch (u32)
+ {
+ case 0:
+ pThis->fTimesyncBackdoorLo = false;
+ break;
+ case 1:
+ pThis->fTimesyncBackdoorLo = true;
+ break;
+ default:
+ Log(("vmmdevAltTimeSyncWrite: Invalid access cb=%#x u32=%#x\n", cb, u32));
+ break;
+ }
+ }
+ else
+ Log(("vmmdevAltTimeSyncWrite: Invalid access cb=%#x u32=%#x\n", cb, u32));
+ return VINF_SUCCESS;
+}
+
+/**
+ * @callback_method_impl{FNIOMIOPORTOUT, Alternative time synchronization.}
+ */
+static DECLCALLBACK(int) vmmdevAltTimeSyncRead(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t *pu32, unsigned cb)
+{
+ RT_NOREF2(pvUser, Port);
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, VMMDevState *);
+ int rc;
+ if (cb == 4)
+ {
+ if (pThis->fTimesyncBackdoorLo)
+ *pu32 = (uint32_t)pThis->hostTime;
+ else
+ {
+ /* Reading the high dword gets and saves the current time. */
+ RTTIMESPEC Now;
+ pThis->hostTime = RTTimeSpecGetMilli(PDMDevHlpTMUtcNow(pDevIns, &Now));
+ *pu32 = (uint32_t)(pThis->hostTime >> 32);
+ }
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ Log(("vmmdevAltTimeSyncRead: Invalid access cb=%#x\n", cb));
+ rc = VERR_IOM_IOPORT_UNUSED;
+ }
+ return rc;
+}
+
+#endif /* VMMDEV_WITH_ALT_TIMESYNC */
+
+
+/* -=-=-=-=-=- IBase -=-=-=-=-=- */
+
+/**
+ * @interface_method_impl{PDMIBASE,pfnQueryInterface}
+ */
+static DECLCALLBACK(void *) vmmdevPortQueryInterface(PPDMIBASE pInterface, const char *pszIID)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IBase);
+
+ PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
+ PDMIBASE_RETURN_INTERFACE(pszIID, PDMIVMMDEVPORT, &pThis->IPort);
+#ifdef VBOX_WITH_HGCM
+ PDMIBASE_RETURN_INTERFACE(pszIID, PDMIHGCMPORT, &pThis->IHGCMPort);
+#endif
+ /* Currently only for shared folders. */
+ PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->SharedFolders.ILeds);
+ return NULL;
+}
+
+
+/* -=-=-=-=-=- ILeds -=-=-=-=-=- */
+
+/**
+ * Gets the pointer to the status LED of a unit.
+ *
+ * @returns VBox status code.
+ * @param pInterface Pointer to the interface structure containing the called function pointer.
+ * @param iLUN The unit which status LED we desire.
+ * @param ppLed Where to store the LED pointer.
+ */
+static DECLCALLBACK(int) vmmdevQueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, SharedFolders.ILeds);
+ if (iLUN == 0) /* LUN 0 is shared folders */
+ {
+ *ppLed = &pThis->SharedFolders.Led;
+ return VINF_SUCCESS;
+ }
+ return VERR_PDM_LUN_NOT_FOUND;
+}
+
+
+/* -=-=-=-=-=- PDMIVMMDEVPORT (VMMDEV::IPort) -=-=-=-=-=- */
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnQueryAbsoluteMouse}
+ */
+static DECLCALLBACK(int) vmmdevIPort_QueryAbsoluteMouse(PPDMIVMMDEVPORT pInterface, int32_t *pxAbs, int32_t *pyAbs)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+
+ /** @todo at the first sign of trouble in this area, just enter the critsect.
+ * As indicated by the comment below, the atomic reads serves no real purpose
+ * here since we can assume cache coherency protocoles and int32_t alignment
+ * rules making sure we won't see a halfwritten value. */
+ if (pxAbs)
+ *pxAbs = ASMAtomicReadS32(&pThis->mouseXAbs); /* why the atomic read? */
+ if (pyAbs)
+ *pyAbs = ASMAtomicReadS32(&pThis->mouseYAbs);
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnSetAbsoluteMouse}
+ */
+static DECLCALLBACK(int) vmmdevIPort_SetAbsoluteMouse(PPDMIVMMDEVPORT pInterface, int32_t xAbs, int32_t yAbs)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ if ( pThis->mouseXAbs != xAbs
+ || pThis->mouseYAbs != yAbs)
+ {
+ Log2(("vmmdevIPort_SetAbsoluteMouse : settings absolute position to x = %d, y = %d\n", xAbs, yAbs));
+ pThis->mouseXAbs = xAbs;
+ pThis->mouseYAbs = yAbs;
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
+ }
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnQueryMouseCapabilities}
+ */
+static DECLCALLBACK(int) vmmdevIPort_QueryMouseCapabilities(PPDMIVMMDEVPORT pInterface, uint32_t *pfCapabilities)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ AssertPtrReturn(pfCapabilities, VERR_INVALID_PARAMETER);
+
+ *pfCapabilities = pThis->mouseCapabilities;
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnUpdateMouseCapabilities}
+ */
+static DECLCALLBACK(int)
+vmmdevIPort_UpdateMouseCapabilities(PPDMIVMMDEVPORT pInterface, uint32_t fCapsAdded, uint32_t fCapsRemoved)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ uint32_t fOldCaps = pThis->mouseCapabilities;
+ pThis->mouseCapabilities &= ~(fCapsRemoved & VMMDEV_MOUSE_HOST_MASK);
+ pThis->mouseCapabilities |= (fCapsAdded & VMMDEV_MOUSE_HOST_MASK)
+ | VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR;
+ bool fNotify = fOldCaps != pThis->mouseCapabilities;
+
+ LogRelFlow(("VMMDev: vmmdevIPort_UpdateMouseCapabilities: fCapsAdded=0x%x, fCapsRemoved=0x%x, fNotify=%RTbool\n", fCapsAdded,
+ fCapsRemoved, fNotify));
+
+ if (fNotify)
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED);
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+static bool vmmdevIsMonitorDefEqual(VMMDevDisplayDef const *pNew, VMMDevDisplayDef const *pOld)
+{
+ bool fEqual = pNew->idDisplay == pOld->idDisplay;
+
+ fEqual = fEqual && ( !RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN) /* No change. */
+ || ( RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN) /* Old value exists and */
+ && pNew->xOrigin == pOld->xOrigin /* the old is equal to the new. */
+ && pNew->yOrigin == pOld->yOrigin));
+
+ fEqual = fEqual && ( !RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_CX)
+ || ( RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_CX)
+ && pNew->cx == pOld->cx));
+
+ fEqual = fEqual && ( !RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_CY)
+ || ( RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_CY)
+ && pNew->cy == pOld->cy));
+
+ fEqual = fEqual && ( !RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_BPP)
+ || ( RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_BPP)
+ && pNew->cBitsPerPixel == pOld->cBitsPerPixel));
+
+ fEqual = fEqual && ( RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_DISABLED)
+ == RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_DISABLED));
+
+ fEqual = fEqual && ( RT_BOOL(pNew->fDisplayFlags & VMMDEV_DISPLAY_PRIMARY)
+ == RT_BOOL(pOld->fDisplayFlags & VMMDEV_DISPLAY_PRIMARY));
+
+ return fEqual;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnRequestDisplayChange}
+ */
+static DECLCALLBACK(int)
+vmmdevIPort_RequestDisplayChange(PPDMIVMMDEVPORT pInterface, uint32_t cDisplays, VMMDevDisplayDef const *paDisplays, bool fForce)
+{
+ int rc = VINF_SUCCESS;
+
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ bool fNotifyGuest = false;
+
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ uint32_t i;
+ for (i = 0; i < cDisplays; ++i)
+ {
+ VMMDevDisplayDef const *p = &paDisplays[i];
+
+ /* Either one display definition is provided or the display id must be equal to the array index. */
+ AssertBreakStmt(cDisplays == 1 || p->idDisplay == i, rc = VERR_INVALID_PARAMETER);
+ AssertBreakStmt(p->idDisplay < RT_ELEMENTS(pThis->displayChangeData.aRequests), rc = VERR_INVALID_PARAMETER);
+
+ DISPLAYCHANGEREQUEST *pRequest = &pThis->displayChangeData.aRequests[p->idDisplay];
+
+ VMMDevDisplayDef const *pLastRead = &pRequest->lastReadDisplayChangeRequest;
+
+ /* Verify that the new resolution is different and that guest does not yet know about it. */
+ bool const fDifferentResolution = fForce || !vmmdevIsMonitorDefEqual(p, pLastRead);
+
+ LogFunc(("same=%d. New: %dx%d, cBits=%d, id=%d. Old: %dx%d, cBits=%d, id=%d. @%d,%d, Enabled=%d, ChangeOrigin=%d\n",
+ !fDifferentResolution, p->cx, p->cy, p->cBitsPerPixel, p->idDisplay,
+ pLastRead->cx, pLastRead->cy, pLastRead->cBitsPerPixel, pLastRead->idDisplay,
+ p->xOrigin, p->yOrigin,
+ !RT_BOOL(p->fDisplayFlags & VMMDEV_DISPLAY_DISABLED),
+ RT_BOOL(p->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN)));
+
+ /* We could validate the information here but hey, the guest can do that as well! */
+ pRequest->displayChangeRequest = *p;
+ pRequest->fPending = fDifferentResolution;
+
+ fNotifyGuest = fNotifyGuest || fDifferentResolution;
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ if (fNotifyGuest)
+ {
+ for (i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); ++i)
+ {
+ DISPLAYCHANGEREQUEST *pRequest = &pThis->displayChangeData.aRequests[i];
+ if (pRequest->fPending)
+ {
+ VMMDevDisplayDef const *p = &pRequest->displayChangeRequest;
+ LogRel(("VMMDev: SetVideoModeHint: Got a video mode hint (%dx%dx%d)@(%dx%d),(%d;%d) at %d\n",
+ p->cx, p->cy, p->cBitsPerPixel, p->xOrigin, p->yOrigin,
+ !RT_BOOL(p->fDisplayFlags & VMMDEV_DISPLAY_DISABLED),
+ RT_BOOL(p->fDisplayFlags & VMMDEV_DISPLAY_ORIGIN), i));
+ }
+ }
+
+ /* IRQ so the guest knows what's going on */
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST);
+ }
+ }
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return rc;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnRequestSeamlessChange}
+ */
+static DECLCALLBACK(int) vmmdevIPort_RequestSeamlessChange(PPDMIVMMDEVPORT pInterface, bool fEnabled)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ /* Verify that the new resolution is different and that guest does not yet know about it. */
+ bool fSameMode = (pThis->fLastSeamlessEnabled == fEnabled);
+
+ Log(("vmmdevIPort_RequestSeamlessChange: same=%d. new=%d\n", fSameMode, fEnabled));
+
+ if (!fSameMode)
+ {
+ /* we could validate the information here but hey, the guest can do that as well! */
+ pThis->fSeamlessEnabled = fEnabled;
+
+ /* IRQ so the guest knows what's going on */
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST);
+ }
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnSetMemoryBalloon}
+ */
+static DECLCALLBACK(int) vmmdevIPort_SetMemoryBalloon(PPDMIVMMDEVPORT pInterface, uint32_t cMbBalloon)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ /* Verify that the new resolution is different and that guest does not yet know about it. */
+ Log(("vmmdevIPort_SetMemoryBalloon: old=%u new=%u\n", pThis->cMbMemoryBalloonLast, cMbBalloon));
+ if (pThis->cMbMemoryBalloonLast != cMbBalloon)
+ {
+ /* we could validate the information here but hey, the guest can do that as well! */
+ pThis->cMbMemoryBalloon = cMbBalloon;
+
+ /* IRQ so the guest knows what's going on */
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_BALLOON_CHANGE_REQUEST);
+ }
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnVRDPChange}
+ */
+static DECLCALLBACK(int) vmmdevIPort_VRDPChange(PPDMIVMMDEVPORT pInterface, bool fVRDPEnabled, uint32_t uVRDPExperienceLevel)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ bool fSame = (pThis->fVRDPEnabled == fVRDPEnabled);
+
+ Log(("vmmdevIPort_VRDPChange: old=%d. new=%d\n", pThis->fVRDPEnabled, fVRDPEnabled));
+
+ if (!fSame)
+ {
+ pThis->fVRDPEnabled = fVRDPEnabled;
+ pThis->uVRDPExperienceLevel = uVRDPExperienceLevel;
+
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_VRDP);
+ }
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnSetStatisticsInterval}
+ */
+static DECLCALLBACK(int) vmmdevIPort_SetStatisticsInterval(PPDMIVMMDEVPORT pInterface, uint32_t cSecsStatInterval)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ /* Verify that the new resolution is different and that guest does not yet know about it. */
+ bool fSame = (pThis->u32LastStatIntervalSize == cSecsStatInterval);
+
+ Log(("vmmdevIPort_SetStatisticsInterval: old=%d. new=%d\n", pThis->u32LastStatIntervalSize, cSecsStatInterval));
+
+ if (!fSame)
+ {
+ /* we could validate the information here but hey, the guest can do that as well! */
+ pThis->u32StatIntervalSize = cSecsStatInterval;
+
+ /* IRQ so the guest knows what's going on */
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST);
+ }
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnSetCredentials}
+ */
+static DECLCALLBACK(int) vmmdevIPort_SetCredentials(PPDMIVMMDEVPORT pInterface, const char *pszUsername,
+ const char *pszPassword, const char *pszDomain, uint32_t fFlags)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ AssertReturn(fFlags & (VMMDEV_SETCREDENTIALS_GUESTLOGON | VMMDEV_SETCREDENTIALS_JUDGE), VERR_INVALID_PARAMETER);
+ size_t const cchUsername = strlen(pszUsername);
+ AssertReturn(cchUsername < VMMDEV_CREDENTIALS_SZ_SIZE, VERR_BUFFER_OVERFLOW);
+ size_t const cchPassword = strlen(pszPassword);
+ AssertReturn(cchPassword < VMMDEV_CREDENTIALS_SZ_SIZE, VERR_BUFFER_OVERFLOW);
+ size_t const cchDomain = strlen(pszDomain);
+ AssertReturn(cchDomain < VMMDEV_CREDENTIALS_SZ_SIZE, VERR_BUFFER_OVERFLOW);
+
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ /*
+ * Logon mode
+ */
+ if (fFlags & VMMDEV_SETCREDENTIALS_GUESTLOGON)
+ {
+ /* memorize the data */
+ memcpy(pThis->pCredentials->Logon.szUserName, pszUsername, cchUsername);
+ pThis->pCredentials->Logon.szUserName[cchUsername] = '\0';
+ memcpy(pThis->pCredentials->Logon.szPassword, pszPassword, cchPassword);
+ pThis->pCredentials->Logon.szPassword[cchPassword] = '\0';
+ memcpy(pThis->pCredentials->Logon.szDomain, pszDomain, cchDomain);
+ pThis->pCredentials->Logon.szDomain[cchDomain] = '\0';
+ pThis->pCredentials->Logon.fAllowInteractiveLogon = !(fFlags & VMMDEV_SETCREDENTIALS_NOLOCALLOGON);
+ }
+ /*
+ * Credentials verification mode?
+ */
+ else
+ {
+ /* memorize the data */
+ memcpy(pThis->pCredentials->Judge.szUserName, pszUsername, cchUsername);
+ pThis->pCredentials->Judge.szUserName[cchUsername] = '\0';
+ memcpy(pThis->pCredentials->Judge.szPassword, pszPassword, cchPassword);
+ pThis->pCredentials->Judge.szPassword[cchPassword] = '\0';
+ memcpy(pThis->pCredentials->Judge.szDomain, pszDomain, cchDomain);
+ pThis->pCredentials->Judge.szDomain[cchDomain] = '\0';
+
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_JUDGE_CREDENTIALS);
+ }
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnVBVAChange}
+ *
+ * Notification from the Display. Especially useful when acceleration is
+ * disabled after a video mode change.
+ */
+static DECLCALLBACK(void) vmmdevIPort_VBVAChange(PPDMIVMMDEVPORT pInterface, bool fEnabled)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ Log(("vmmdevIPort_VBVAChange: fEnabled = %d\n", fEnabled));
+
+ /* Only used by saved state, which I guess is why we don't bother with locking here. */
+ pThis->u32VideoAccelEnabled = fEnabled;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnCpuHotUnplug}
+ */
+static DECLCALLBACK(int) vmmdevIPort_CpuHotUnplug(PPDMIVMMDEVPORT pInterface, uint32_t idCpuCore, uint32_t idCpuPackage)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ int rc = VINF_SUCCESS;
+
+ Log(("vmmdevIPort_CpuHotUnplug: idCpuCore=%u idCpuPackage=%u\n", idCpuCore, idCpuPackage));
+
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ if (pThis->fCpuHotPlugEventsEnabled)
+ {
+ pThis->enmCpuHotPlugEvent = VMMDevCpuEventType_Unplug;
+ pThis->idCpuCore = idCpuCore;
+ pThis->idCpuPackage = idCpuPackage;
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_CPU_HOTPLUG);
+ }
+ else
+ rc = VERR_VMMDEV_CPU_HOTPLUG_NOT_MONITORED_BY_GUEST;
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return rc;
+}
+
+/**
+ * @interface_method_impl{PDMIVMMDEVPORT,pfnCpuHotPlug}
+ */
+static DECLCALLBACK(int) vmmdevIPort_CpuHotPlug(PPDMIVMMDEVPORT pInterface, uint32_t idCpuCore, uint32_t idCpuPackage)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDEV, IPort);
+ int rc = VINF_SUCCESS;
+
+ Log(("vmmdevCpuPlug: idCpuCore=%u idCpuPackage=%u\n", idCpuCore, idCpuPackage));
+
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ if (pThis->fCpuHotPlugEventsEnabled)
+ {
+ pThis->enmCpuHotPlugEvent = VMMDevCpuEventType_Plug;
+ pThis->idCpuCore = idCpuCore;
+ pThis->idCpuPackage = idCpuPackage;
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_CPU_HOTPLUG);
+ }
+ else
+ rc = VERR_VMMDEV_CPU_HOTPLUG_NOT_MONITORED_BY_GUEST;
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return rc;
+}
+
+
+/* -=-=-=-=-=- Saved State -=-=-=-=-=- */
+
+/**
+ * @callback_method_impl{FNSSMDEVLIVEEXEC}
+ */
+static DECLCALLBACK(int) vmmdevLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
+{
+ RT_NOREF1(uPass);
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, PVMMDEV);
+
+ SSMR3PutBool(pSSM, pThis->fGetHostTimeDisabled);
+ SSMR3PutBool(pSSM, pThis->fBackdoorLogDisabled);
+ SSMR3PutBool(pSSM, pThis->fKeepCredentials);
+ SSMR3PutBool(pSSM, pThis->fHeapEnabled);
+
+ return VINF_SSM_DONT_CALL_AGAIN;
+}
+
+
+/**
+ * @callback_method_impl{FNSSMDEVSAVEEXEC}
+ */
+static DECLCALLBACK(int) vmmdevSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
+{
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, PVMMDEV);
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ vmmdevLiveExec(pDevIns, pSSM, SSM_PASS_FINAL);
+
+ SSMR3PutU32(pSSM, pThis->hypervisorSize);
+ SSMR3PutU32(pSSM, pThis->mouseCapabilities);
+ SSMR3PutS32(pSSM, pThis->mouseXAbs);
+ SSMR3PutS32(pSSM, pThis->mouseYAbs);
+
+ SSMR3PutBool(pSSM, pThis->fNewGuestFilterMask);
+ SSMR3PutU32(pSSM, pThis->u32NewGuestFilterMask);
+ SSMR3PutU32(pSSM, pThis->u32GuestFilterMask);
+ SSMR3PutU32(pSSM, pThis->u32HostEventFlags);
+ /* The following is not strictly necessary as PGM restores MMIO2, keeping it for historical reasons. */
+ SSMR3PutMem(pSSM, &pThis->pVMMDevRAMR3->V, sizeof(pThis->pVMMDevRAMR3->V));
+
+ SSMR3PutMem(pSSM, &pThis->guestInfo, sizeof(pThis->guestInfo));
+ SSMR3PutU32(pSSM, pThis->fu32AdditionsOk);
+ SSMR3PutU32(pSSM, pThis->u32VideoAccelEnabled);
+ SSMR3PutBool(pSSM, pThis->displayChangeData.fGuestSentChangeEventAck);
+
+ SSMR3PutU32(pSSM, pThis->guestCaps);
+
+#ifdef VBOX_WITH_HGCM
+ vmmdevHGCMSaveState(pThis, pSSM);
+#endif /* VBOX_WITH_HGCM */
+
+ SSMR3PutU32(pSSM, pThis->fHostCursorRequested);
+
+ SSMR3PutU32(pSSM, pThis->guestInfo2.uFullVersion);
+ SSMR3PutU32(pSSM, pThis->guestInfo2.uRevision);
+ SSMR3PutU32(pSSM, pThis->guestInfo2.fFeatures);
+ SSMR3PutStrZ(pSSM, pThis->guestInfo2.szName);
+ SSMR3PutU32(pSSM, pThis->cFacilityStatuses);
+ for (uint32_t i = 0; i < pThis->cFacilityStatuses; i++)
+ {
+ SSMR3PutU32(pSSM, pThis->aFacilityStatuses[i].enmFacility);
+ SSMR3PutU32(pSSM, pThis->aFacilityStatuses[i].fFlags);
+ SSMR3PutU16(pSSM, (uint16_t)pThis->aFacilityStatuses[i].enmStatus);
+ SSMR3PutS64(pSSM, RTTimeSpecGetNano(&pThis->aFacilityStatuses[i].TimeSpecTS));
+ }
+
+ /* Heartbeat: */
+ SSMR3PutBool(pSSM, pThis->fHeartbeatActive);
+ SSMR3PutBool(pSSM, pThis->fFlatlined);
+ SSMR3PutU64(pSSM, pThis->nsLastHeartbeatTS);
+ TMR3TimerSave(pThis->pFlatlinedTimer, pSSM);
+
+ PDMCritSectLeave(&pThis->CritSect);
+ return VINF_SUCCESS;
+}
+
+/**
+ * @callback_method_impl{FNSSMDEVLOADEXEC}
+ */
+static DECLCALLBACK(int) vmmdevLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ /** @todo The code load code is assuming we're always loaded into a freshly
+ * constructed VM. */
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, PVMMDEV);
+ int rc;
+
+ if ( uVersion > VMMDEV_SAVED_STATE_VERSION
+ || uVersion < 6)
+ return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
+
+ /* config */
+ if (uVersion > VMMDEV_SAVED_STATE_VERSION_VBOX_30)
+ {
+ bool f;
+ rc = SSMR3GetBool(pSSM, &f); AssertRCReturn(rc, rc);
+ if (pThis->fGetHostTimeDisabled != f)
+ LogRel(("VMMDev: Config mismatch - fGetHostTimeDisabled: config=%RTbool saved=%RTbool\n", pThis->fGetHostTimeDisabled, f));
+
+ rc = SSMR3GetBool(pSSM, &f); AssertRCReturn(rc, rc);
+ if (pThis->fBackdoorLogDisabled != f)
+ LogRel(("VMMDev: Config mismatch - fBackdoorLogDisabled: config=%RTbool saved=%RTbool\n", pThis->fBackdoorLogDisabled, f));
+
+ rc = SSMR3GetBool(pSSM, &f); AssertRCReturn(rc, rc);
+ if (pThis->fKeepCredentials != f)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - fKeepCredentials: config=%RTbool saved=%RTbool"),
+ pThis->fKeepCredentials, f);
+ rc = SSMR3GetBool(pSSM, &f); AssertRCReturn(rc, rc);
+ if (pThis->fHeapEnabled != f)
+ return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - fHeapEnabled: config=%RTbool saved=%RTbool"),
+ pThis->fHeapEnabled, f);
+ }
+
+ if (uPass != SSM_PASS_FINAL)
+ return VINF_SUCCESS;
+
+ /* state */
+ SSMR3GetU32(pSSM, &pThis->hypervisorSize);
+ SSMR3GetU32(pSSM, &pThis->mouseCapabilities);
+ SSMR3GetS32(pSSM, &pThis->mouseXAbs);
+ SSMR3GetS32(pSSM, &pThis->mouseYAbs);
+
+ SSMR3GetBool(pSSM, &pThis->fNewGuestFilterMask);
+ SSMR3GetU32(pSSM, &pThis->u32NewGuestFilterMask);
+ SSMR3GetU32(pSSM, &pThis->u32GuestFilterMask);
+ SSMR3GetU32(pSSM, &pThis->u32HostEventFlags);
+
+ //SSMR3GetBool(pSSM, &pThis->pVMMDevRAMR3->fHaveEvents);
+ // here be dragons (probably)
+ SSMR3GetMem(pSSM, &pThis->pVMMDevRAMR3->V, sizeof (pThis->pVMMDevRAMR3->V));
+
+ SSMR3GetMem(pSSM, &pThis->guestInfo, sizeof (pThis->guestInfo));
+ SSMR3GetU32(pSSM, &pThis->fu32AdditionsOk);
+ SSMR3GetU32(pSSM, &pThis->u32VideoAccelEnabled);
+ if (uVersion > 10)
+ SSMR3GetBool(pSSM, &pThis->displayChangeData.fGuestSentChangeEventAck);
+
+ rc = SSMR3GetU32(pSSM, &pThis->guestCaps);
+
+ /* Attributes which were temporarily introduced in r30072 */
+ if (uVersion == 7)
+ {
+ uint32_t temp;
+ SSMR3GetU32(pSSM, &temp);
+ rc = SSMR3GetU32(pSSM, &temp);
+ }
+ AssertRCReturn(rc, rc);
+
+#ifdef VBOX_WITH_HGCM
+ rc = vmmdevHGCMLoadState(pThis, pSSM, uVersion);
+ AssertRCReturn(rc, rc);
+#endif /* VBOX_WITH_HGCM */
+
+ if (uVersion >= 10)
+ rc = SSMR3GetU32(pSSM, &pThis->fHostCursorRequested);
+ AssertRCReturn(rc, rc);
+
+ if (uVersion > VMMDEV_SAVED_STATE_VERSION_MISSING_GUEST_INFO_2)
+ {
+ SSMR3GetU32(pSSM, &pThis->guestInfo2.uFullVersion);
+ SSMR3GetU32(pSSM, &pThis->guestInfo2.uRevision);
+ SSMR3GetU32(pSSM, &pThis->guestInfo2.fFeatures);
+ rc = SSMR3GetStrZ(pSSM, &pThis->guestInfo2.szName[0], sizeof(pThis->guestInfo2.szName));
+ AssertRCReturn(rc, rc);
+ }
+
+ if (uVersion > VMMDEV_SAVED_STATE_VERSION_MISSING_FACILITY_STATUSES)
+ {
+ uint32_t cFacilityStatuses;
+ rc = SSMR3GetU32(pSSM, &cFacilityStatuses);
+ AssertRCReturn(rc, rc);
+
+ for (uint32_t i = 0; i < cFacilityStatuses; i++)
+ {
+ uint32_t uFacility, fFlags;
+ uint16_t uStatus;
+ int64_t iTimeStampNano;
+
+ SSMR3GetU32(pSSM, &uFacility);
+ SSMR3GetU32(pSSM, &fFlags);
+ SSMR3GetU16(pSSM, &uStatus);
+ rc = SSMR3GetS64(pSSM, &iTimeStampNano);
+ AssertRCReturn(rc, rc);
+
+ PVMMDEVFACILITYSTATUSENTRY pEntry = vmmdevGetFacilityStatusEntry(pThis, (VBoxGuestFacilityType)uFacility);
+ AssertLogRelMsgReturn(pEntry,
+ ("VMMDev: Ran out of entries restoring the guest facility statuses. Saved state has %u.\n", cFacilityStatuses),
+ VERR_OUT_OF_RESOURCES);
+ pEntry->enmStatus = (VBoxGuestFacilityStatus)uStatus;
+ pEntry->fFlags = fFlags;
+ RTTimeSpecSetNano(&pEntry->TimeSpecTS, iTimeStampNano);
+ }
+ }
+
+ /*
+ * Heartbeat.
+ */
+ if (uVersion >= VMMDEV_SAVED_STATE_VERSION_HEARTBEAT)
+ {
+ SSMR3GetBool(pSSM, (bool *)&pThis->fHeartbeatActive);
+ SSMR3GetBool(pSSM, (bool *)&pThis->fFlatlined);
+ SSMR3GetU64(pSSM, (uint64_t *)&pThis->nsLastHeartbeatTS);
+ rc = TMR3TimerLoad(pThis->pFlatlinedTimer, pSSM);
+ AssertRCReturn(rc, rc);
+ if (pThis->fFlatlined)
+ LogRel(("vmmdevLoadState: Guest has flatlined. Last heartbeat %'RU64 ns before state was saved.\n",
+ TMTimerGetNano(pThis->pFlatlinedTimer) - pThis->nsLastHeartbeatTS));
+ }
+
+ /*
+ * On a resume, we send the capabilities changed message so
+ * that listeners can sync their state again
+ */
+ Log(("vmmdevLoadState: capabilities changed (%x), informing connector\n", pThis->mouseCapabilities));
+ if (pThis->pDrv)
+ {
+ pThis->pDrv->pfnUpdateMouseCapabilities(pThis->pDrv, pThis->mouseCapabilities);
+ if (uVersion >= 10)
+ pThis->pDrv->pfnUpdatePointerShape(pThis->pDrv,
+ /*fVisible=*/!!pThis->fHostCursorRequested,
+ /*fAlpha=*/false,
+ /*xHot=*/0, /*yHot=*/0,
+ /*cx=*/0, /*cy=*/0,
+ /*pvShape=*/NULL);
+ }
+
+ if (pThis->fu32AdditionsOk)
+ {
+ vmmdevLogGuestOsInfo(&pThis->guestInfo);
+ if (pThis->pDrv)
+ {
+ if (pThis->guestInfo2.uFullVersion && pThis->pDrv->pfnUpdateGuestInfo2)
+ pThis->pDrv->pfnUpdateGuestInfo2(pThis->pDrv, pThis->guestInfo2.uFullVersion, pThis->guestInfo2.szName,
+ pThis->guestInfo2.uRevision, pThis->guestInfo2.fFeatures);
+ if (pThis->pDrv->pfnUpdateGuestInfo)
+ pThis->pDrv->pfnUpdateGuestInfo(pThis->pDrv, &pThis->guestInfo);
+
+ if (pThis->pDrv->pfnUpdateGuestStatus)
+ {
+ for (uint32_t i = 0; i < pThis->cFacilityStatuses; i++) /* ascending order! */
+ if ( pThis->aFacilityStatuses[i].enmStatus != VBoxGuestFacilityStatus_Inactive
+ || !pThis->aFacilityStatuses[i].fFixed)
+ pThis->pDrv->pfnUpdateGuestStatus(pThis->pDrv,
+ pThis->aFacilityStatuses[i].enmFacility,
+ (uint16_t)pThis->aFacilityStatuses[i].enmStatus,
+ pThis->aFacilityStatuses[i].fFlags,
+ &pThis->aFacilityStatuses[i].TimeSpecTS);
+ }
+ }
+ }
+ if (pThis->pDrv && pThis->pDrv->pfnUpdateGuestCapabilities)
+ pThis->pDrv->pfnUpdateGuestCapabilities(pThis->pDrv, pThis->guestCaps);
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Load state done callback. Notify guest of restore event.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance.
+ * @param pSSM The handle to the saved state.
+ */
+static DECLCALLBACK(int) vmmdevLoadStateDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
+{
+ RT_NOREF1(pSSM);
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, PVMMDEV);
+
+#ifdef VBOX_WITH_HGCM
+ int rc = vmmdevHGCMLoadStateDone(pThis);
+ AssertLogRelRCReturn(rc, rc);
+#endif /* VBOX_WITH_HGCM */
+
+ /* Reestablish the acceleration status. */
+ if ( pThis->u32VideoAccelEnabled
+ && pThis->pDrv)
+ {
+ pThis->pDrv->pfnVideoAccelEnable(pThis->pDrv, !!pThis->u32VideoAccelEnabled, &pThis->pVMMDevRAMR3->vbvaMemory);
+ }
+
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_RESTORED);
+
+ return VINF_SUCCESS;
+}
+
+
+/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
+
+/**
+ * (Re-)initializes the MMIO2 data.
+ *
+ * @param pThis Pointer to the VMMDev instance data.
+ */
+static void vmmdevInitRam(PVMMDEV pThis)
+{
+ memset(pThis->pVMMDevRAMR3, 0, sizeof(VMMDevMemory));
+ pThis->pVMMDevRAMR3->u32Size = sizeof(VMMDevMemory);
+ pThis->pVMMDevRAMR3->u32Version = VMMDEV_MEMORY_VERSION;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnReset}
+ */
+static DECLCALLBACK(void) vmmdevReset(PPDMDEVINS pDevIns)
+{
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, PVMMDEV);
+ PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED);
+
+ /*
+ * Reset the mouse integration feature bits
+ */
+ if (pThis->mouseCapabilities & VMMDEV_MOUSE_GUEST_MASK)
+ {
+ pThis->mouseCapabilities &= ~VMMDEV_MOUSE_GUEST_MASK;
+ /* notify the connector */
+ Log(("vmmdevReset: capabilities changed (%x), informing connector\n", pThis->mouseCapabilities));
+ pThis->pDrv->pfnUpdateMouseCapabilities(pThis->pDrv, pThis->mouseCapabilities);
+ }
+ pThis->fHostCursorRequested = false;
+
+ pThis->hypervisorSize = 0;
+
+ /* re-initialize the VMMDev memory */
+ if (pThis->pVMMDevRAMR3)
+ vmmdevInitRam(pThis);
+
+ /* credentials have to go away (by default) */
+ if (!pThis->fKeepCredentials)
+ {
+ memset(pThis->pCredentials->Logon.szUserName, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pThis->pCredentials->Logon.szPassword, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pThis->pCredentials->Logon.szDomain, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ }
+ memset(pThis->pCredentials->Judge.szUserName, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pThis->pCredentials->Judge.szPassword, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+ memset(pThis->pCredentials->Judge.szDomain, '\0', VMMDEV_CREDENTIALS_SZ_SIZE);
+
+ /* Reset means that additions will report again. */
+ const bool fVersionChanged = pThis->fu32AdditionsOk
+ || pThis->guestInfo.interfaceVersion
+ || pThis->guestInfo.osType != VBOXOSTYPE_Unknown;
+ if (fVersionChanged)
+ Log(("vmmdevReset: fu32AdditionsOk=%d additionsVersion=%x osType=%#x\n",
+ pThis->fu32AdditionsOk, pThis->guestInfo.interfaceVersion, pThis->guestInfo.osType));
+ pThis->fu32AdditionsOk = false;
+ memset (&pThis->guestInfo, 0, sizeof (pThis->guestInfo));
+ RT_ZERO(pThis->guestInfo2);
+ const bool fCapsChanged = pThis->guestCaps != 0; /* Report transition to 0. */
+ pThis->guestCaps = 0;
+
+ /* Clear facilities. No need to tell Main as it will get a
+ pfnUpdateGuestInfo callback. */
+ RTTIMESPEC TimeStampNow;
+ RTTimeNow(&TimeStampNow);
+ uint32_t iFacility = pThis->cFacilityStatuses;
+ while (iFacility-- > 0)
+ {
+ pThis->aFacilityStatuses[iFacility].enmStatus = VBoxGuestFacilityStatus_Inactive;
+ pThis->aFacilityStatuses[iFacility].TimeSpecTS = TimeStampNow;
+ }
+
+ /* clear pending display change request. */
+ for (unsigned i = 0; i < RT_ELEMENTS(pThis->displayChangeData.aRequests); i++)
+ {
+ DISPLAYCHANGEREQUEST *pRequest = &pThis->displayChangeData.aRequests[i];
+ memset (&pRequest->lastReadDisplayChangeRequest, 0, sizeof (pRequest->lastReadDisplayChangeRequest));
+ }
+ pThis->displayChangeData.iCurrentMonitor = 0;
+ pThis->displayChangeData.fGuestSentChangeEventAck = false;
+
+ /* disable seamless mode */
+ pThis->fLastSeamlessEnabled = false;
+
+ /* disabled memory ballooning */
+ pThis->cMbMemoryBalloonLast = 0;
+
+ /* disabled statistics updating */
+ pThis->u32LastStatIntervalSize = 0;
+
+#ifdef VBOX_WITH_HGCM
+ /* Clear the "HGCM event enabled" flag so the event can be automatically reenabled. */
+ pThis->u32HGCMEnabled = 0;
+#endif
+
+ /*
+ * Deactive heartbeat.
+ */
+ if (pThis->fHeartbeatActive)
+ {
+ TMTimerStop(pThis->pFlatlinedTimer);
+ pThis->fFlatlined = false;
+ pThis->fHeartbeatActive = true;
+ }
+
+ /*
+ * Clear the event variables.
+ *
+ * XXX By design we should NOT clear pThis->u32HostEventFlags because it is designed
+ * that way so host events do not depend on guest resets. However, the pending
+ * event flags actually _were_ cleared since ages so we mask out events from
+ * clearing which we really need to survive the reset. See xtracker 5767.
+ */
+ pThis->u32HostEventFlags &= VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
+ pThis->u32GuestFilterMask = 0;
+ pThis->u32NewGuestFilterMask = 0;
+ pThis->fNewGuestFilterMask = 0;
+
+ /*
+ * Call the update functions as required.
+ */
+ if (fVersionChanged && pThis->pDrv && pThis->pDrv->pfnUpdateGuestInfo)
+ pThis->pDrv->pfnUpdateGuestInfo(pThis->pDrv, &pThis->guestInfo);
+ if (fCapsChanged && pThis->pDrv && pThis->pDrv->pfnUpdateGuestCapabilities)
+ pThis->pDrv->pfnUpdateGuestCapabilities(pThis->pDrv, pThis->guestCaps);
+
+ /*
+ * Generate a unique session id for this VM; it will be changed for each start, reset or restore.
+ * This can be used for restore detection inside the guest.
+ */
+ pThis->idSession = ASMReadTSC();
+
+ PDMCritSectLeave(&pThis->CritSect);
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnRelocate}
+ */
+static DECLCALLBACK(void) vmmdevRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
+{
+ if (offDelta)
+ {
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, PVMMDEV);
+ LogFlow(("vmmdevRelocate: offDelta=%RGv\n", offDelta));
+
+ if (pThis->pVMMDevRAMRC)
+ pThis->pVMMDevRAMRC += offDelta;
+ pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
+ }
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnDestruct}
+ */
+static DECLCALLBACK(int) vmmdevDestruct(PPDMDEVINS pDevIns)
+{
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, PVMMDEV);
+ PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
+
+ /*
+ * Wipe and free the credentials.
+ */
+ if (pThis->pCredentials)
+ {
+ RTMemWipeThoroughly(pThis->pCredentials, sizeof(*pThis->pCredentials), 10);
+ RTMemFree(pThis->pCredentials);
+ pThis->pCredentials = NULL;
+ }
+
+#ifdef VBOX_WITH_HGCM
+ /*
+ * Everything HGCM.
+ */
+ vmmdevHGCMDestroy(pThis);
+#endif
+
+ /*
+ * Free the request buffers.
+ */
+ for (uint32_t iCpu = 0; iCpu < RT_ELEMENTS(pThis->apReqBufs); iCpu++)
+ {
+ pThis->apReqBufs[iCpu] = NULL;
+ RTMemPageFree(pThis->apReqBufs[iCpu], _4K);
+ }
+
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+ /*
+ * Clean up the testing device.
+ */
+ vmmdevTestingTerminate(pDevIns);
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @interface_method_impl{PDMDEVREG,pfnConstruct}
+ */
+static DECLCALLBACK(int) vmmdevConstruct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
+{
+ PVMMDEV pThis = PDMINS_2_DATA(pDevIns, PVMMDEV);
+ int rc;
+
+ Assert(iInstance == 0);
+ PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
+
+ /*
+ * Initialize data (most of it anyway).
+ */
+ /* Save PDM device instance data for future reference. */
+ pThis->pDevInsR3 = pDevIns;
+ pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
+ pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
+
+ /* PCI vendor, just a free bogus value */
+ PCIDevSetVendorId(&pThis->PciDev, 0x80ee);
+ /* device ID */
+ PCIDevSetDeviceId(&pThis->PciDev, 0xcafe);
+ /* class sub code (other type of system peripheral) */
+ PCIDevSetClassSub(&pThis->PciDev, 0x80);
+ /* class base code (base system peripheral) */
+ PCIDevSetClassBase(&pThis->PciDev, 0x08);
+ /* header type */
+ PCIDevSetHeaderType(&pThis->PciDev, 0x00);
+ /* interrupt on pin 0 */
+ PCIDevSetInterruptPin(&pThis->PciDev, 0x01);
+
+ RTTIMESPEC TimeStampNow;
+ RTTimeNow(&TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_VBoxGuestDriver, true /*fFixed*/, &TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_VBoxService, true /*fFixed*/, &TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_VBoxTrayClient, true /*fFixed*/, &TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_Seamless, true /*fFixed*/, &TimeStampNow);
+ vmmdevAllocFacilityStatusEntry(pThis, VBoxGuestFacilityType_Graphics, true /*fFixed*/, &TimeStampNow);
+ Assert(pThis->cFacilityStatuses == 5);
+
+ /*
+ * Interfaces
+ */
+ /* IBase */
+ pThis->IBase.pfnQueryInterface = vmmdevPortQueryInterface;
+
+ /* VMMDev port */
+ pThis->IPort.pfnQueryAbsoluteMouse = vmmdevIPort_QueryAbsoluteMouse;
+ pThis->IPort.pfnSetAbsoluteMouse = vmmdevIPort_SetAbsoluteMouse ;
+ pThis->IPort.pfnQueryMouseCapabilities = vmmdevIPort_QueryMouseCapabilities;
+ pThis->IPort.pfnUpdateMouseCapabilities = vmmdevIPort_UpdateMouseCapabilities;
+ pThis->IPort.pfnRequestDisplayChange = vmmdevIPort_RequestDisplayChange;
+ pThis->IPort.pfnSetCredentials = vmmdevIPort_SetCredentials;
+ pThis->IPort.pfnVBVAChange = vmmdevIPort_VBVAChange;
+ pThis->IPort.pfnRequestSeamlessChange = vmmdevIPort_RequestSeamlessChange;
+ pThis->IPort.pfnSetMemoryBalloon = vmmdevIPort_SetMemoryBalloon;
+ pThis->IPort.pfnSetStatisticsInterval = vmmdevIPort_SetStatisticsInterval;
+ pThis->IPort.pfnVRDPChange = vmmdevIPort_VRDPChange;
+ pThis->IPort.pfnCpuHotUnplug = vmmdevIPort_CpuHotUnplug;
+ pThis->IPort.pfnCpuHotPlug = vmmdevIPort_CpuHotPlug;
+
+ /* Shared folder LED */
+ pThis->SharedFolders.Led.u32Magic = PDMLED_MAGIC;
+ pThis->SharedFolders.ILeds.pfnQueryStatusLed = vmmdevQueryStatusLed;
+
+#ifdef VBOX_WITH_HGCM
+ /* HGCM port */
+ pThis->IHGCMPort.pfnCompleted = hgcmCompleted;
+ pThis->IHGCMPort.pfnIsCmdRestored = hgcmIsCmdRestored;
+ pThis->IHGCMPort.pfnIsCmdCancelled = hgcmIsCmdCancelled;
+ pThis->IHGCMPort.pfnGetRequestor = hgcmGetRequestor;
+ pThis->IHGCMPort.pfnGetVMMDevSessionId = hgcmGetVMMDevSessionId;
+#endif
+
+ pThis->pCredentials = (VMMDEVCREDS *)RTMemAllocZ(sizeof(*pThis->pCredentials));
+ if (!pThis->pCredentials)
+ return VERR_NO_MEMORY;
+
+
+ /*
+ * Validate and read the configuration.
+ */
+ PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
+ "GetHostTimeDisabled|"
+ "BackdoorLogDisabled|"
+ "KeepCredentials|"
+ "HeapEnabled|"
+ "RZEnabled|"
+ "GuestCoreDumpEnabled|"
+ "GuestCoreDumpDir|"
+ "GuestCoreDumpCount|"
+ "HeartbeatInterval|"
+ "HeartbeatTimeout|"
+ "TestingEnabled|"
+ "TestingMMIO|"
+ "TestintXmlOutputFile"
+ ,
+ "");
+
+ rc = CFGMR3QueryBoolDef(pCfg, "GetHostTimeDisabled", &pThis->fGetHostTimeDisabled, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"GetHostTimeDisabled\" as a boolean"));
+
+ rc = CFGMR3QueryBoolDef(pCfg, "BackdoorLogDisabled", &pThis->fBackdoorLogDisabled, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"BackdoorLogDisabled\" as a boolean"));
+
+ rc = CFGMR3QueryBoolDef(pCfg, "KeepCredentials", &pThis->fKeepCredentials, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"KeepCredentials\" as a boolean"));
+
+ rc = CFGMR3QueryBoolDef(pCfg, "HeapEnabled", &pThis->fHeapEnabled, true);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"HeapEnabled\" as a boolean"));
+
+ rc = CFGMR3QueryBoolDef(pCfg, "RZEnabled", &pThis->fRZEnabled, true);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"RZEnabled\" as a boolean"));
+
+ rc = CFGMR3QueryBoolDef(pCfg, "GuestCoreDumpEnabled", &pThis->fGuestCoreDumpEnabled, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"GuestCoreDumpEnabled\" as a boolean"));
+
+ char *pszGuestCoreDumpDir = NULL;
+ rc = CFGMR3QueryStringAllocDef(pCfg, "GuestCoreDumpDir", &pszGuestCoreDumpDir, "");
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"GuestCoreDumpDir\" as a string"));
+
+ RTStrCopy(pThis->szGuestCoreDumpDir, sizeof(pThis->szGuestCoreDumpDir), pszGuestCoreDumpDir);
+ MMR3HeapFree(pszGuestCoreDumpDir);
+
+ rc = CFGMR3QueryU32Def(pCfg, "GuestCoreDumpCount", &pThis->cGuestCoreDumps, 3);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"GuestCoreDumpCount\" as a 32-bit unsigned integer"));
+
+ rc = CFGMR3QueryU64Def(pCfg, "HeartbeatInterval", &pThis->cNsHeartbeatInterval, VMMDEV_HEARTBEAT_DEFAULT_INTERVAL);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"HeartbeatInterval\" as a 64-bit unsigned integer"));
+ if (pThis->cNsHeartbeatInterval < RT_NS_100MS / 2)
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Heartbeat interval \"HeartbeatInterval\" too small"));
+
+ rc = CFGMR3QueryU64Def(pCfg, "HeartbeatTimeout", &pThis->cNsHeartbeatTimeout, pThis->cNsHeartbeatInterval * 2);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"HeartbeatTimeout\" as a 64-bit unsigned integer"));
+ if (pThis->cNsHeartbeatTimeout < RT_NS_100MS)
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Heartbeat timeout \"HeartbeatTimeout\" too small"));
+ if (pThis->cNsHeartbeatTimeout <= pThis->cNsHeartbeatInterval + RT_NS_10MS)
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Configuration error: Heartbeat timeout \"HeartbeatTimeout\" value (%'ull ns) is too close to the interval (%'ull ns)"),
+ pThis->cNsHeartbeatTimeout, pThis->cNsHeartbeatInterval);
+
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+ rc = CFGMR3QueryBoolDef(pCfg, "TestingEnabled", &pThis->fTestingEnabled, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"TestingEnabled\" as a boolean"));
+ rc = CFGMR3QueryBoolDef(pCfg, "TestingMMIO", &pThis->fTestingMMIO, false);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc,
+ N_("Configuration error: Failed querying \"TestingMMIO\" as a boolean"));
+ rc = CFGMR3QueryStringAllocDef(pCfg, "TestintXmlOutputFile", &pThis->pszTestingXmlOutput, NULL);
+ if (RT_FAILURE(rc))
+ return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed querying \"TestintXmlOutputFile\" as a string"));
+
+ /** @todo image-to-load-filename? */
+#endif
+
+ pThis->cbGuestRAM = MMR3PhysGetRamSize(PDMDevHlpGetVM(pDevIns));
+
+ /*
+ * We do our own locking entirely. So, install NOP critsect for the device
+ * and create our own critsect for use where it really matters (++).
+ */
+ rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
+ AssertRCReturn(rc, rc);
+ rc = PDMDevHlpCritSectInit(pDevIns, &pThis->CritSect, RT_SRC_POS, "VMMDev#%u", iInstance);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Register the backdoor logging port
+ */
+ rc = PDMDevHlpIOPortRegister(pDevIns, RTLOG_DEBUG_PORT, 1, NULL, vmmdevBackdoorLog,
+ NULL, NULL, NULL, "VMMDev backdoor logging");
+ AssertRCReturn(rc, rc);
+
+#ifdef VMMDEV_WITH_ALT_TIMESYNC
+ /*
+ * Alternative timesync source.
+ *
+ * This was orignally added for creating a simple time sync service in an
+ * OpenBSD guest without requiring VBoxGuest and VBoxService to be ported
+ * first. We keep it in case it comes in handy.
+ */
+ rc = PDMDevHlpIOPortRegister(pDevIns, 0x505, 1, NULL,
+ vmmdevAltTimeSyncWrite, vmmdevAltTimeSyncRead,
+ NULL, NULL, "VMMDev timesync backdoor");
+ AssertRCReturn(rc, rc);
+#endif
+
+ /*
+ * Register the PCI device.
+ */
+ rc = PDMDevHlpPCIRegister(pDevIns, &pThis->PciDev);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (pThis->PciDev.uDevFn != 32 || iInstance != 0)
+ Log(("!!WARNING!!: pThis->PciDev.uDevFn=%d (ignore if testcase or no started by Main)\n", pThis->PciDev.uDevFn));
+ rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, 0x20, PCI_ADDRESS_SPACE_IO, vmmdevIOPortRegionMap);
+ if (RT_FAILURE(rc))
+ return rc;
+ rc = PDMDevHlpPCIIORegionRegister(pDevIns, 1, VMMDEV_RAM_SIZE, PCI_ADDRESS_SPACE_MEM, vmmdevIORAMRegionMap);
+ if (RT_FAILURE(rc))
+ return rc;
+ if (pThis->fHeapEnabled)
+ {
+ rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, VMMDEV_HEAP_SIZE, PCI_ADDRESS_SPACE_MEM_PREFETCH, vmmdevIORAMRegionMap);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ /*
+ * Allocate and initialize the MMIO2 memory.
+ *
+ * We map the first page into raw-mode and kernel contexts so we can handle
+ * interrupt acknowledge requests more timely.
+ */
+ rc = PDMDevHlpMMIO2Register(pDevIns, &pThis->PciDev, 1 /*iRegion*/, VMMDEV_RAM_SIZE, 0 /*fFlags*/,
+ (void **)&pThis->pVMMDevRAMR3, "VMMDev");
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Failed to allocate %u bytes of memory for the VMM device"), VMMDEV_RAM_SIZE);
+ vmmdevInitRam(pThis);
+ if (pThis->fRZEnabled)
+ {
+ rc = PDMDevHlpMMIO2MapKernel(pDevIns, &pThis->PciDev, 1 /*iRegion*/, 0 /*off*/, PAGE_SIZE, "VMMDev", &pThis->pVMMDevRAMR0);
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Failed to map first page of the VMMDev ram into kernel space: %Rrc"), rc);
+
+#ifdef VBOX_WITH_RAW_MODE
+ rc = PDMDevHlpMMHyperMapMMIO2(pDevIns, &pThis->PciDev, 1 /*iRegion*/, 0 /*off*/, PAGE_SIZE, "VMMDev", &pThis->pVMMDevRAMRC);
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Failed to map first page of the VMMDev ram into raw-mode context: %Rrc"), rc);
+#endif
+ }
+
+ /*
+ * Allocate and initialize the MMIO2 heap.
+ */
+ if (pThis->fHeapEnabled)
+ {
+ rc = PDMDevHlpMMIO2Register(pDevIns, &pThis->PciDev, 2 /*iRegion*/, VMMDEV_HEAP_SIZE, 0 /*fFlags*/,
+ (void **)&pThis->pVMMDevHeapR3, "VMMDev Heap");
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
+ N_("Failed to allocate %u bytes of memory for the VMM device heap"), PAGE_SIZE);
+
+ /* Register the memory area with PDM so HM can access it before it's mapped. */
+ rc = PDMDevHlpRegisterVMMDevHeap(pDevIns, NIL_RTGCPHYS, pThis->pVMMDevHeapR3, VMMDEV_HEAP_SIZE);
+ AssertLogRelRCReturn(rc, rc);
+ }
+
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+ /*
+ * Initialize testing.
+ */
+ rc = vmmdevTestingInitialize(pDevIns);
+ if (RT_FAILURE(rc))
+ return rc;
+#endif
+
+ /*
+ * Get the corresponding connector interface
+ */
+ rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "VMM Driver Port");
+ if (RT_SUCCESS(rc))
+ {
+ pThis->pDrv = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIVMMDEVCONNECTOR);
+ AssertMsgReturn(pThis->pDrv, ("LUN #0 doesn't have a VMMDev connector interface!\n"), VERR_PDM_MISSING_INTERFACE);
+#ifdef VBOX_WITH_HGCM
+ pThis->pHGCMDrv = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIHGCMCONNECTOR);
+ if (!pThis->pHGCMDrv)
+ {
+ Log(("LUN #0 doesn't have a HGCM connector interface, HGCM is not supported. rc=%Rrc\n", rc));
+ /* this is not actually an error, just means that there is no support for HGCM */
+ }
+#endif
+ /* Query the initial balloon size. */
+ AssertPtr(pThis->pDrv->pfnQueryBalloonSize);
+ rc = pThis->pDrv->pfnQueryBalloonSize(pThis->pDrv, &pThis->cMbMemoryBalloon);
+ AssertRC(rc);
+
+ Log(("Initial balloon size %x\n", pThis->cMbMemoryBalloon));
+ }
+ else if (rc == VERR_PDM_NO_ATTACHED_DRIVER)
+ {
+ Log(("%s/%d: warning: no driver attached to LUN #0!\n", pDevIns->pReg->szName, pDevIns->iInstance));
+ rc = VINF_SUCCESS;
+ }
+ else
+ AssertMsgFailedReturn(("Failed to attach LUN #0! rc=%Rrc\n", rc), rc);
+
+ /*
+ * Attach status driver for shared folders (optional).
+ */
+ PPDMIBASE pBase;
+ rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
+ if (RT_SUCCESS(rc))
+ pThis->SharedFolders.pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
+ else if (rc != VERR_PDM_NO_ATTACHED_DRIVER)
+ {
+ AssertMsgFailed(("Failed to attach to status driver. rc=%Rrc\n", rc));
+ return rc;
+ }
+
+ /*
+ * Register saved state and init the HGCM CmdList critsect.
+ */
+ rc = PDMDevHlpSSMRegisterEx(pDevIns, VMMDEV_SAVED_STATE_VERSION, sizeof(*pThis), NULL,
+ NULL, vmmdevLiveExec, NULL,
+ NULL, vmmdevSaveExec, NULL,
+ NULL, vmmdevLoadExec, vmmdevLoadStateDone);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Create heartbeat checking timer.
+ */
+ rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, vmmDevHeartbeatFlatlinedTimer, pThis,
+ TMTIMER_FLAGS_NO_CRIT_SECT, "Heartbeat flatlined", &pThis->pFlatlinedTimer);
+ AssertRCReturn(rc, rc);
+
+#ifdef VBOX_WITH_HGCM
+ rc = vmmdevHGCMInit(pThis);
+ AssertRCReturn(rc, rc);
+#endif
+
+ /*
+ * In this version of VirtualBox the GUI checks whether "needs host cursor"
+ * changes.
+ */
+ pThis->mouseCapabilities |= VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR;
+
+ /*
+ * Statistics.
+ */
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMemBalloonChunks, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Memory balloon size", "/Devices/VMMDev/BalloonChunks");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatFastIrqAckR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Fast IRQ acknowledgments handled in ring-3.", "/Devices/VMMDev/FastIrqAckR3");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatFastIrqAckRZ, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Fast IRQ acknowledgments handled in ring-0 or raw-mode.", "/Devices/VMMDev/FastIrqAckRZ");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatSlowIrqAck, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Slow IRQ acknowledgments (old style).", "/Devices/VMMDev/SlowIrqAck");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReqBufAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Times a larger request buffer was required.", "/Devices/VMMDev/LargeReqBufAllocs");
+#ifdef VBOX_WITH_HGCM
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatHgcmCmdArrival, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
+ "Profiling HGCM call arrival processing", "/HGCM/MsgArrival");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatHgcmCmdCompletion, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
+ "Profiling HGCM call completion processing", "/HGCM/MsgCompletion");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatHgcmCmdTotal, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
+ "Profiling whole HGCM call.", "/HGCM/MsgTotal");
+ PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatHgcmLargeCmdAllocs,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+ "Times the allocation cache could not be used.", "/HGCM/LargeCmdAllocs");
+#endif
+
+ /*
+ * Generate a unique session id for this VM; it will be changed for each
+ * start, reset or restore. This can be used for restore detection inside
+ * the guest.
+ */
+ pThis->idSession = ASMReadTSC();
+ return rc;
+}
+
+/**
+ * The device registration structure.
+ */
+extern "C" const PDMDEVREG g_DeviceVMMDev =
+{
+ /* u32Version */
+ PDM_DEVREG_VERSION,
+ /* szName */
+ "VMMDev",
+ /* szRCMod */
+ "VBoxDDRC.rc",
+ /* szR0Mod */
+ "VBoxDDR0.r0",
+ /* pszDescription */
+ "VirtualBox VMM Device\n",
+ /* fFlags */
+ PDM_DEVREG_FLAGS_HOST_BITS_DEFAULT | PDM_DEVREG_FLAGS_GUEST_BITS_DEFAULT | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
+ /* fClass */
+ PDM_DEVREG_CLASS_VMM_DEV,
+ /* cMaxInstances */
+ 1,
+ /* cbInstance */
+ sizeof(VMMDevState),
+ /* pfnConstruct */
+ vmmdevConstruct,
+ /* pfnDestruct */
+ vmmdevDestruct,
+ /* pfnRelocate */
+ vmmdevRelocate,
+ /* pfnMemSetup */
+ NULL,
+ /* pfnPowerOn */
+ NULL,
+ /* pfnReset */
+ vmmdevReset,
+ /* pfnSuspend */
+ NULL,
+ /* pfnResume */
+ NULL,
+ /* pfnAttach */
+ NULL,
+ /* pfnDetach */
+ NULL,
+ /* pfnQueryInterface. */
+ NULL,
+ /* pfnInitComplete */
+ NULL,
+ /* pfnPowerOff */
+ NULL,
+ /* pfnSoftReset */
+ NULL,
+ /* u32VersionEnd */
+ PDM_DEVREG_VERSION
+};
+#endif /* IN_RING3 */
+#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
diff --git a/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp b/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp
new file mode 100644
index 00000000..2cd8b412
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp
@@ -0,0 +1,2426 @@
+/* $Id: VMMDevHGCM.cpp $ */
+/** @file
+ * VMMDev - HGCM - Host-Guest Communication Manager Device.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DEV_VMM
+#include <iprt/alloc.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/param.h>
+#include <iprt/string.h>
+
+#include <VBox/AssertGuest.h>
+#include <VBox/err.h>
+#include <VBox/hgcmsvc.h>
+#include <VBox/log.h>
+
+#include "VMMDevHGCM.h"
+
+#ifdef DEBUG
+# define VBOX_STRICT_GUEST
+#endif
+
+#ifdef VBOX_WITH_DTRACE
+# include "dtrace/VBoxDD.h"
+#else
+# define VBOXDD_HGCMCALL_ENTER(a,b,c,d) do { } while (0)
+# define VBOXDD_HGCMCALL_COMPLETED_REQ(a,b) do { } while (0)
+# define VBOXDD_HGCMCALL_COMPLETED_EMT(a,b) do { } while (0)
+# define VBOXDD_HGCMCALL_COMPLETED_DONE(a,b,c,d) do { } while (0)
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+typedef enum VBOXHGCMCMDTYPE
+{
+ VBOXHGCMCMDTYPE_LOADSTATE = 0,
+ VBOXHGCMCMDTYPE_CONNECT,
+ VBOXHGCMCMDTYPE_DISCONNECT,
+ VBOXHGCMCMDTYPE_CALL,
+ VBOXHGCMCMDTYPE_SizeHack = 0x7fffffff
+} VBOXHGCMCMDTYPE;
+
+/**
+ * Information about a 32 or 64 bit parameter.
+ */
+typedef struct VBOXHGCMPARMVAL
+{
+ /** Actual value. Both 32 and 64 bit is saved here. */
+ uint64_t u64Value;
+
+ /** Offset from the start of the request where the value is stored. */
+ uint32_t offValue;
+
+ /** Size of the value: 4 for 32 bit and 8 for 64 bit. */
+ uint32_t cbValue;
+
+} VBOXHGCMPARMVAL;
+
+/**
+ * Information about a pointer parameter.
+ */
+typedef struct VBOXHGCMPARMPTR
+{
+ /** Size of the buffer described by the pointer parameter. */
+ uint32_t cbData;
+
+ /** Offset in the first physical page of the region. */
+ uint32_t offFirstPage;
+
+ /** How many pages. */
+ uint32_t cPages;
+
+ /** How the buffer should be copied VBOX_HGCM_F_PARM_*. */
+ uint32_t fu32Direction;
+
+ /** Pointer to array of the GC physical addresses for these pages.
+ * It is assumed that the physical address of the locked resident guest page
+ * does not change. */
+ RTGCPHYS *paPages;
+
+ /** For single page requests. */
+ RTGCPHYS GCPhysSinglePage;
+
+} VBOXHGCMPARMPTR;
+
+/**
+ * Information about a guest HGCM parameter.
+ */
+typedef struct VBOXHGCMGUESTPARM
+{
+ /** The parameter type. */
+ HGCMFunctionParameterType enmType;
+
+ union
+ {
+ VBOXHGCMPARMVAL val;
+ VBOXHGCMPARMPTR ptr;
+ } u;
+
+} VBOXHGCMGUESTPARM;
+
+typedef struct VBOXHGCMCMD
+{
+ /** Active commands, list is protected by critsectHGCMCmdList. */
+ RTLISTNODE node;
+
+ /** The type of the command (VBOXHGCMCMDTYPE). */
+ uint8_t enmCmdType;
+
+ /** Whether the command was cancelled by the guest. */
+ bool fCancelled;
+
+ /** Whether the command was restored from saved state. */
+ bool fRestored;
+
+ /** Set if allocated from the memory cache, clear if heap. */
+ bool fMemCache;
+
+ /** Copy of VMMDevRequestHeader::fRequestor.
+ * @note Only valid if VBOXGSTINFO2_F_REQUESTOR_INFO is set in
+ * VMMDevState.guestInfo2.fFeatures. */
+ uint32_t fRequestor;
+
+ /** GC physical address of the guest request. */
+ RTGCPHYS GCPhys;
+
+ /** Request packet size. */
+ uint32_t cbRequest;
+
+ /** The type of the guest request. */
+ VMMDevRequestType enmRequestType;
+
+ /** Pointer to the locked request, NULL if not locked. */
+ void *pvReqLocked;
+ /** The PGM lock for GCPhys if pvReqLocked is not NULL. */
+ PGMPAGEMAPLOCK ReqMapLock;
+
+ /** The STAM_GET_TS() value when the request arrived. */
+ uint64_t tsArrival;
+ /** The STAM_GET_TS() value when the hgcmCompleted() is called. */
+ uint64_t tsComplete;
+
+ union
+ {
+ struct
+ {
+ uint32_t u32ClientID;
+ HGCMServiceLocation *pLoc; /**< Allocated after this structure. */
+ } connect;
+
+ struct
+ {
+ uint32_t u32ClientID;
+ } disconnect;
+
+ struct
+ {
+ /* Number of elements in paGuestParms and paHostParms arrays. */
+ uint32_t cParms;
+
+ uint32_t u32ClientID;
+
+ uint32_t u32Function;
+
+ /** Pointer to information about guest parameters in case of a Call request.
+ * Follows this structure in the same memory block.
+ */
+ VBOXHGCMGUESTPARM *paGuestParms;
+
+ /** Pointer to converted host parameters in case of a Call request.
+ * Follows this structure in the same memory block.
+ */
+ VBOXHGCMSVCPARM *paHostParms;
+
+ /* VBOXHGCMGUESTPARM[] */
+ /* VBOXHGCMSVCPARM[] */
+ } call;
+ } u;
+} VBOXHGCMCMD;
+
+
+/**
+ * Version for the memory cache.
+ */
+typedef struct VBOXHGCMCMDCACHED
+{
+ VBOXHGCMCMD Core; /**< 112 */
+ VBOXHGCMGUESTPARM aGuestParms[6]; /**< 40 * 6 = 240 */
+ VBOXHGCMSVCPARM aHostParms[6]; /**< 24 * 6 = 144 */
+} VBOXHGCMCMDCACHED; /**< 112+240+144 = 496 */
+AssertCompile(sizeof(VBOXHGCMCMD) <= 112);
+AssertCompile(sizeof(VBOXHGCMGUESTPARM) <= 40);
+AssertCompile(sizeof(VBOXHGCMSVCPARM) <= 24);
+AssertCompile(sizeof(VBOXHGCMCMDCACHED) <= 512);
+AssertCompile(sizeof(VBOXHGCMCMDCACHED) > sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
+
+
+static int vmmdevHGCMCmdListLock(PVMMDEV pThis)
+{
+ int rc = RTCritSectEnter(&pThis->critsectHGCMCmdList);
+ AssertRC(rc);
+ return rc;
+}
+
+static void vmmdevHGCMCmdListUnlock(PVMMDEV pThis)
+{
+ int rc = RTCritSectLeave(&pThis->critsectHGCMCmdList);
+ AssertRC(rc);
+}
+
+/** Allocate and initialize VBOXHGCMCMD structure for HGCM request.
+ *
+ * @returns Pointer to the command on success, NULL otherwise.
+ * @param pThis The VMMDev instance data.
+ * @param enmCmdType Type of the command.
+ * @param GCPhys The guest physical address of the HGCM request.
+ * @param cbRequest The size of the HGCM request.
+ * @param cParms Number of HGCM parameters for VBOXHGCMCMDTYPE_CALL command.
+ * @param fRequestor The VMMDevRequestHeader::fRequestor value.
+ */
+static PVBOXHGCMCMD vmmdevHGCMCmdAlloc(PVMMDEV pThis, VBOXHGCMCMDTYPE enmCmdType, RTGCPHYS GCPhys,
+ uint32_t cbRequest, uint32_t cParms, uint32_t fRequestor)
+{
+#if 1
+ /*
+ * Try use the cache.
+ */
+ VBOXHGCMCMDCACHED *pCmdCached;
+ AssertCompile(sizeof(*pCmdCached) >= sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
+ if (cParms <= RT_ELEMENTS(pCmdCached->aGuestParms))
+ {
+ int rc = RTMemCacheAllocEx(pThis->hHgcmCmdCache, (void **)&pCmdCached);
+ if (RT_SUCCESS(rc))
+ {
+ RT_ZERO(*pCmdCached);
+ pCmdCached->Core.fMemCache = true;
+ pCmdCached->Core.GCPhys = GCPhys;
+ pCmdCached->Core.cbRequest = cbRequest;
+ pCmdCached->Core.enmCmdType = enmCmdType;
+ pCmdCached->Core.fRequestor = fRequestor;
+ if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ pCmdCached->Core.u.call.cParms = cParms;
+ pCmdCached->Core.u.call.paGuestParms = pCmdCached->aGuestParms;
+ pCmdCached->Core.u.call.paHostParms = pCmdCached->aHostParms;
+ }
+ else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
+ pCmdCached->Core.u.connect.pLoc = (HGCMServiceLocation *)(&pCmdCached->Core + 1);
+
+ return &pCmdCached->Core;
+ }
+ return NULL;
+ }
+ STAM_REL_COUNTER_INC(&pThis->StatHgcmLargeCmdAllocs);
+
+#else
+ RT_NOREF(pThis);
+#endif
+
+ /* Size of required memory buffer. */
+ const uint32_t cbCmd = sizeof(VBOXHGCMCMD) + cParms * (sizeof(VBOXHGCMGUESTPARM) + sizeof(VBOXHGCMSVCPARM))
+ + (enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? sizeof(HGCMServiceLocation) : 0);
+
+ PVBOXHGCMCMD pCmd = (PVBOXHGCMCMD)RTMemAllocZ(cbCmd);
+ if (pCmd)
+ {
+ pCmd->enmCmdType = enmCmdType;
+ pCmd->GCPhys = GCPhys;
+ pCmd->cbRequest = cbRequest;
+ pCmd->fRequestor = fRequestor;
+
+ if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ pCmd->u.call.cParms = cParms;
+ if (cParms)
+ {
+ pCmd->u.call.paGuestParms = (VBOXHGCMGUESTPARM *)((uint8_t *)pCmd
+ + sizeof(struct VBOXHGCMCMD));
+ pCmd->u.call.paHostParms = (VBOXHGCMSVCPARM *)((uint8_t *)pCmd->u.call.paGuestParms
+ + cParms * sizeof(VBOXHGCMGUESTPARM));
+ }
+ }
+ else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
+ pCmd->u.connect.pLoc = (HGCMServiceLocation *)(pCmd + 1);
+ }
+ return pCmd;
+}
+
+/** Deallocate VBOXHGCMCMD memory.
+ *
+ * @param pThis The VMMDev instance data.
+ * @param pCmd Command to deallocate.
+ */
+static void vmmdevHGCMCmdFree(PVMMDEV pThis, PVBOXHGCMCMD pCmd)
+{
+ if (pCmd)
+ {
+ if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ uint32_t i;
+ for (i = 0; i < pCmd->u.call.cParms; ++i)
+ {
+ VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+
+ if (pHostParm->type == VBOX_HGCM_SVC_PARM_PTR)
+ RTMemFree(pHostParm->u.pointer.addr);
+
+ if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
+ || pGuestParm->enmType == VMMDevHGCMParmType_PageList
+ || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
+ if (pGuestParm->u.ptr.paPages != &pGuestParm->u.ptr.GCPhysSinglePage)
+ RTMemFree(pGuestParm->u.ptr.paPages);
+ }
+ }
+
+ if (pCmd->pvReqLocked)
+ {
+ PDMDevHlpPhysReleasePageMappingLock(pThis->pDevInsR3, &pCmd->ReqMapLock);
+ pCmd->pvReqLocked = NULL;
+ }
+
+#if 1
+ if (pCmd->fMemCache)
+ RTMemCacheFree(pThis->hHgcmCmdCache, pCmd);
+ else
+#endif
+ RTMemFree(pCmd);
+ }
+}
+
+/** Add VBOXHGCMCMD to the list of pending commands.
+ *
+ * @returns VBox status code.
+ * @param pThis The VMMDev instance data.
+ * @param pCmd Command to add.
+ */
+static int vmmdevHGCMAddCommand(PVMMDEV pThis, PVBOXHGCMCMD pCmd)
+{
+ int rc = vmmdevHGCMCmdListLock(pThis);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("%p type %d\n", pCmd, pCmd->enmCmdType));
+
+ RTListPrepend(&pThis->listHGCMCmd, &pCmd->node);
+
+ /* Automatically enable HGCM events, if there are HGCM commands. */
+ if ( pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT
+ || pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT
+ || pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ LogFunc(("u32HGCMEnabled = %d\n", pThis->u32HGCMEnabled));
+ if (ASMAtomicCmpXchgU32(&pThis->u32HGCMEnabled, 1, 0))
+ VMMDevCtlSetGuestFilterMask(pThis, VMMDEV_EVENT_HGCM, 0);
+ }
+
+ vmmdevHGCMCmdListUnlock(pThis);
+ return rc;
+}
+
+/** Remove VBOXHGCMCMD from the list of pending commands.
+ *
+ * @returns VBox status code.
+ * @param pThis The VMMDev instance data.
+ * @param pCmd Command to remove.
+ */
+static int vmmdevHGCMRemoveCommand(PVMMDEV pThis, PVBOXHGCMCMD pCmd)
+{
+ int rc = vmmdevHGCMCmdListLock(pThis);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("%p\n", pCmd));
+
+ RTListNodeRemove(&pCmd->node);
+
+ vmmdevHGCMCmdListUnlock(pThis);
+ return rc;
+}
+
+/**
+ * Find a HGCM command by its physical address.
+ *
+ * The caller is responsible for taking the command list lock before calling
+ * this function.
+ *
+ * @returns Pointer to the command on success, NULL otherwise.
+ * @param pThis The VMMDev instance data.
+ * @param GCPhys The physical address of the command we're looking for.
+ */
+DECLINLINE(PVBOXHGCMCMD) vmmdevHGCMFindCommandLocked(PVMMDEV pThis, RTGCPHYS GCPhys)
+{
+ PVBOXHGCMCMD pCmd;
+ RTListForEach(&pThis->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
+ {
+ if (pCmd->GCPhys == GCPhys)
+ return pCmd;
+ }
+ return NULL;
+}
+
+/** Copy VMMDevHGCMConnect request data from the guest to VBOXHGCMCMD command.
+ *
+ * @param pHGCMConnect The source guest request (cached in host memory).
+ * @param pCmd Destination command.
+ */
+static void vmmdevHGCMConnectFetch(const VMMDevHGCMConnect *pHGCMConnect, PVBOXHGCMCMD pCmd)
+{
+ pCmd->enmRequestType = pHGCMConnect->header.header.requestType;
+ pCmd->u.connect.u32ClientID = pHGCMConnect->u32ClientID;
+ *pCmd->u.connect.pLoc = pHGCMConnect->loc;
+}
+
+/** Handle VMMDevHGCMConnect request.
+ *
+ * @param pThis The VMMDev instance data.
+ * @param pHGCMConnect The guest request (cached in host memory).
+ * @param GCPhys The physical address of the request.
+ */
+int vmmdevHGCMConnect(PVMMDEV pThis, const VMMDevHGCMConnect *pHGCMConnect, RTGCPHYS GCPhys)
+{
+ int rc = VINF_SUCCESS;
+
+ PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_CONNECT, GCPhys, pHGCMConnect->header.header.size, 0,
+ pHGCMConnect->header.header.fRequestor);
+ if (pCmd)
+ {
+ vmmdevHGCMConnectFetch(pHGCMConnect, pCmd);
+
+ /* Only allow the guest to use existing services! */
+ ASSERT_GUEST(pHGCMConnect->loc.type == VMMDevHGCMLoc_LocalHost_Existing);
+ pCmd->u.connect.pLoc->type = VMMDevHGCMLoc_LocalHost_Existing;
+
+ vmmdevHGCMAddCommand(pThis, pCmd);
+ rc = pThis->pHGCMDrv->pfnConnect(pThis->pHGCMDrv, pCmd, pCmd->u.connect.pLoc, &pCmd->u.connect.u32ClientID);
+ if (RT_FAILURE(rc))
+ vmmdevHGCMRemoveCommand(pThis, pCmd);
+ }
+ else
+ {
+ rc = VERR_NO_MEMORY;
+ }
+
+ return rc;
+}
+
+/** Copy VMMDevHGCMDisconnect request data from the guest to VBOXHGCMCMD command.
+ *
+ * @param pHGCMDisconnect The source guest request (cached in host memory).
+ * @param pCmd Destination command.
+ */
+static void vmmdevHGCMDisconnectFetch(const VMMDevHGCMDisconnect *pHGCMDisconnect, PVBOXHGCMCMD pCmd)
+{
+ pCmd->enmRequestType = pHGCMDisconnect->header.header.requestType;
+ pCmd->u.disconnect.u32ClientID = pHGCMDisconnect->u32ClientID;
+}
+
+/** Handle VMMDevHGCMDisconnect request.
+ *
+ * @param pThis The VMMDev instance data.
+ * @param pHGCMDisconnect The guest request (cached in host memory).
+ * @param GCPhys The physical address of the request.
+ */
+int vmmdevHGCMDisconnect(PVMMDEV pThis, const VMMDevHGCMDisconnect *pHGCMDisconnect, RTGCPHYS GCPhys)
+{
+ int rc = VINF_SUCCESS;
+
+ PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_DISCONNECT, GCPhys, pHGCMDisconnect->header.header.size, 0,
+ pHGCMDisconnect->header.header.fRequestor);
+ if (pCmd)
+ {
+ vmmdevHGCMDisconnectFetch(pHGCMDisconnect, pCmd);
+
+ vmmdevHGCMAddCommand(pThis, pCmd);
+ rc = pThis->pHGCMDrv->pfnDisconnect (pThis->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
+ if (RT_FAILURE(rc))
+ vmmdevHGCMRemoveCommand(pThis, pCmd);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ return rc;
+}
+
+/** Translate LinAddr parameter type to the direction of data transfer.
+ *
+ * @returns VBOX_HGCM_F_PARM_DIRECTION_* flags.
+ * @param enmType Type of the LinAddr parameter.
+ */
+static uint32_t vmmdevHGCMParmTypeToDirection(HGCMFunctionParameterType enmType)
+{
+ if (enmType == VMMDevHGCMParmType_LinAddr_In) return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ if (enmType == VMMDevHGCMParmType_LinAddr_Out) return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+}
+
+/** Check if list of pages in a HGCM pointer parameter corresponds to a contiguous buffer.
+ *
+ * @returns true if pages are contiguous, false otherwise.
+ * @param pPtr Information about a pointer HGCM parameter.
+ */
+DECLINLINE(bool) vmmdevHGCMGuestBufferIsContiguous(const VBOXHGCMPARMPTR *pPtr)
+{
+ if (pPtr->cPages == 1)
+ return true;
+ RTGCPHYS64 Phys = pPtr->paPages[0] + PAGE_SIZE;
+ if (Phys != pPtr->paPages[1])
+ return false;
+ if (pPtr->cPages > 2)
+ {
+ uint32_t iPage = 2;
+ do
+ {
+ Phys += PAGE_SIZE;
+ if (Phys != pPtr->paPages[iPage])
+ return false;
+ ++iPage;
+ } while (iPage < pPtr->cPages);
+ }
+ return true;
+}
+
+/** Copy data from guest memory to the host buffer.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance for PDMDevHlp.
+ * @param pvDst The destination host buffer.
+ * @param cbDst Size of the destination host buffer.
+ * @param pPtr Description of the source HGCM pointer parameter.
+ */
+static int vmmdevHGCMGuestBufferRead(PPDMDEVINSR3 pDevIns, void *pvDst, uint32_t cbDst,
+ const VBOXHGCMPARMPTR *pPtr)
+{
+ /*
+ * Try detect contiguous buffers.
+ */
+ /** @todo We need a flag for indicating this. */
+ if (vmmdevHGCMGuestBufferIsContiguous(pPtr))
+ return PDMDevHlpPhysRead(pDevIns, pPtr->paPages[0] | pPtr->offFirstPage, pvDst, cbDst);
+
+ /*
+ * Page by page fallback.
+ */
+ uint8_t *pu8Dst = (uint8_t *)pvDst;
+ uint32_t offPage = pPtr->offFirstPage;
+ uint32_t cbRemaining = cbDst;
+
+ for (uint32_t iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
+ {
+ uint32_t cbToRead = PAGE_SIZE - offPage;
+ if (cbToRead > cbRemaining)
+ cbToRead = cbRemaining;
+
+ /* Skip invalid pages. */
+ const RTGCPHYS GCPhys = pPtr->paPages[iPage];
+ if (GCPhys != NIL_RTGCPHYS)
+ {
+ int rc = PDMDevHlpPhysRead(pDevIns, GCPhys + offPage, pu8Dst, cbToRead);
+ AssertMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp offPage=%#x cbToRead=%#x\n", rc, GCPhys, offPage, cbToRead), rc);
+ }
+
+ offPage = 0; /* A next page is read from 0 offset. */
+ cbRemaining -= cbToRead;
+ pu8Dst += cbToRead;
+ }
+
+ return VINF_SUCCESS;
+}
+
+/** Copy data from the host buffer to guest memory.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The device instance for PDMDevHlp.
+ * @param pPtr Description of the destination HGCM pointer parameter.
+ * @param pvSrc The source host buffer.
+ * @param cbSrc Size of the source host buffer.
+ */
+static int vmmdevHGCMGuestBufferWrite(PPDMDEVINSR3 pDevIns, const VBOXHGCMPARMPTR *pPtr,
+ const void *pvSrc, uint32_t cbSrc)
+{
+ int rc = VINF_SUCCESS;
+
+ uint8_t *pu8Src = (uint8_t *)pvSrc;
+ uint32_t offPage = pPtr->offFirstPage;
+ uint32_t cbRemaining = RT_MIN(cbSrc, pPtr->cbData);
+
+ uint32_t iPage;
+ for (iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
+ {
+ uint32_t cbToWrite = PAGE_SIZE - offPage;
+ if (cbToWrite > cbRemaining)
+ cbToWrite = cbRemaining;
+
+ /* Skip invalid pages. */
+ const RTGCPHYS GCPhys = pPtr->paPages[iPage];
+ if (GCPhys != NIL_RTGCPHYS)
+ {
+ rc = PDMDevHlpPhysWrite(pDevIns, GCPhys + offPage, pu8Src, cbToWrite);
+ AssertRCBreak(rc);
+ }
+
+ offPage = 0; /* A next page is written at 0 offset. */
+ cbRemaining -= cbToWrite;
+ pu8Src += cbToWrite;
+ }
+
+ return rc;
+}
+
+/** Initializes pCmd->paHostParms from already initialized pCmd->paGuestParms.
+ * Allocates memory for pointer parameters and copies data from the guest.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pCmd Command structure where host parameters needs initialization.
+ * @param pbReq The request buffer.
+ */
+static int vmmdevHGCMInitHostParameters(PVMMDEV pThis, PVBOXHGCMCMD pCmd, uint8_t const *pbReq)
+{
+ AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
+
+ for (uint32_t i = 0; i < pCmd->u.call.cParms; ++i)
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+ VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
+
+ switch (pGuestParm->enmType)
+ {
+ case VMMDevHGCMParmType_32bit:
+ {
+ pHostParm->type = VBOX_HGCM_SVC_PARM_32BIT;
+ pHostParm->u.uint32 = (uint32_t)pGuestParm->u.val.u64Value;
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_64bit:
+ {
+ pHostParm->type = VBOX_HGCM_SVC_PARM_64BIT;
+ pHostParm->u.uint64 = pGuestParm->u.val.u64Value;
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_LinAddr_In:
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ case VMMDevHGCMParmType_PageList:
+ case VMMDevHGCMParmType_Embedded:
+ case VMMDevHGCMParmType_ContiguousPageList:
+ {
+ const uint32_t cbData = pGuestParm->u.ptr.cbData;
+
+ pHostParm->type = VBOX_HGCM_SVC_PARM_PTR;
+ pHostParm->u.pointer.size = cbData;
+
+ if (cbData)
+ {
+ /* Zero memory, the buffer content is potentially copied to the guest. */
+ void *pv = RTMemAllocZ(cbData);
+ AssertReturn(pv, VERR_NO_MEMORY);
+ pHostParm->u.pointer.addr = pv;
+
+ if (pGuestParm->u.ptr.fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_TO_HOST)
+ {
+ if (pGuestParm->enmType != VMMDevHGCMParmType_Embedded)
+ {
+ if (pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList)
+ {
+ int rc = vmmdevHGCMGuestBufferRead(pThis->pDevInsR3, pv, cbData, &pGuestParm->u.ptr);
+ ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ }
+ else
+ {
+ int rc = PDMDevHlpPhysRead(pThis->pDevInsR3,
+ pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
+ pv, cbData);
+ ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ }
+ }
+ else
+ {
+ memcpy(pv, &pbReq[pGuestParm->u.ptr.offFirstPage], cbData);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+ }
+ }
+ }
+ else
+ {
+ pHostParm->u.pointer.addr = NULL;
+ }
+
+ break;
+ }
+
+ default:
+ ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/** Allocate and initialize VBOXHGCMCMD structure for a HGCMCall request.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pHGCMCall The HGCMCall request (cached in host memory).
+ * @param cbHGCMCall Size of the request.
+ * @param GCPhys Guest physical address of the request.
+ * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
+ * @param ppCmd Where to store pointer to allocated command.
+ * @param pcbHGCMParmStruct Where to store size of used HGCM parameter structure.
+ */
+static int vmmdevHGCMCallAlloc(PVMMDEV pThis, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPhys,
+ VMMDevRequestType enmRequestType, PVBOXHGCMCMD *ppCmd, uint32_t *pcbHGCMParmStruct)
+{
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ const uint32_t cbHGCMParmStruct = enmRequestType == VMMDevReq_HGCMCall64 ? sizeof(HGCMFunctionParameter64)
+ : sizeof(HGCMFunctionParameter32);
+#else
+ const uint32_t cbHGCMParmStruct = sizeof(HGCMFunctionParameter);
+#endif
+
+ const uint32_t cParms = pHGCMCall->cParms;
+
+ /* Whether there is enough space for parameters and sane upper limit. */
+ ASSERT_GUEST_STMT_RETURN( cParms <= (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct
+ && cParms <= VMMDEV_MAX_HGCM_PARMS,
+ LogRelMax(50, ("VMMDev: request packet with invalid number of HGCM parameters: %d vs %d. Refusing operation.\n",
+ (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct, cParms)),
+ VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_CALL, GCPhys, cbHGCMCall, cParms,
+ pHGCMCall->header.header.fRequestor);
+ if (pCmd == NULL)
+ return VERR_NO_MEMORY;
+
+ /* Request type has been validated in vmmdevReqDispatcher. */
+ pCmd->enmRequestType = enmRequestType;
+ pCmd->u.call.u32ClientID = pHGCMCall->u32ClientID;
+ pCmd->u.call.u32Function = pHGCMCall->u32Function;
+
+ *ppCmd = pCmd;
+ *pcbHGCMParmStruct = cbHGCMParmStruct;
+ return VINF_SUCCESS;
+}
+
+/** Copy VMMDevHGCMCall request data from the guest to VBOXHGCMCMD command.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pCmd The destination command.
+ * @param pHGCMCall The HGCMCall request (cached in host memory).
+ * @param cbHGCMCall Size of the request.
+ * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
+ * @param cbHGCMParmStruct Size of used HGCM parameter structure.
+ */
+static int vmmdevHGCMCallFetchGuestParms(PVMMDEV pThis, PVBOXHGCMCMD pCmd,
+ const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall,
+ VMMDevRequestType enmRequestType, uint32_t cbHGCMParmStruct)
+{
+ /*
+ * Go over all guest parameters and initialize relevant VBOXHGCMCMD fields.
+ * VBOXHGCMCMD must contain all information about the request,
+ * the request will be not read from the guest memory again.
+ */
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ const bool f64Bits = (enmRequestType == VMMDevReq_HGCMCall64);
+#endif
+
+ const uint32_t cParms = pCmd->u.call.cParms;
+
+ /* Offsets in the request buffer to HGCM parameters and additional data. */
+ const uint32_t offHGCMParms = sizeof(VMMDevHGCMCall);
+ const uint32_t offExtra = offHGCMParms + cParms * cbHGCMParmStruct;
+
+ /* Pointer to the next HGCM parameter of the request. */
+ const uint8_t *pu8HGCMParm = (uint8_t *)pHGCMCall + offHGCMParms;
+
+ uint32_t cbTotalData = 0;
+ for (uint32_t i = 0; i < cParms; ++i, pu8HGCMParm += cbHGCMParmStruct)
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, type, HGCMFunctionParameter32, type);
+ pGuestParm->enmType = ((HGCMFunctionParameter64 *)pu8HGCMParm)->type;
+#else
+ pGuestParm->enmType = ((HGCMFunctionParameter *)pu8HGCMParm)->type;
+#endif
+
+ switch (pGuestParm->enmType)
+ {
+ case VMMDevHGCMParmType_32bit:
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value32, HGCMFunctionParameter32, u.value32);
+ uint32_t *pu32 = &((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value32;
+#else
+ uint32_t *pu32 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value32;
+#endif
+ LogFunc(("uint32 guest parameter %RI32\n", *pu32));
+
+ pGuestParm->u.val.u64Value = *pu32;
+ pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu32 - (uintptr_t)pHGCMCall);
+ pGuestParm->u.val.cbValue = sizeof(uint32_t);
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_64bit:
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value64, HGCMFunctionParameter32, u.value64);
+ uint64_t *pu64 = (uint64_t *)(uintptr_t)&((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value64; /* MSC detect misalignment, thus casts. */
+#else
+ uint64_t *pu64 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value64;
+#endif
+ LogFunc(("uint64 guest parameter %RI64\n", *pu64));
+
+ pGuestParm->u.val.u64Value = *pu64;
+ pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu64 - (uintptr_t)pHGCMCall);
+ pGuestParm->u.val.cbValue = sizeof(uint64_t);
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_LinAddr_In: /* In (read) */
+ case VMMDevHGCMParmType_LinAddr_Out: /* Out (write) */
+ case VMMDevHGCMParmType_LinAddr: /* In & Out */
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ uint32_t cbData = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.size
+ : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.size;
+ RTGCPTR GCPtr = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.u.linearAddr
+ : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.u.linearAddr;
+#else
+ uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.size;
+ RTGCPTR GCPtr = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.u.linearAddr;
+#endif
+ LogFunc(("LinAddr guest parameter %RGv, cb %u\n", GCPtr, cbData));
+
+ ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE - cbTotalData, VERR_INVALID_PARAMETER);
+ cbTotalData += cbData;
+
+ const uint32_t offFirstPage = cbData > 0 ? GCPtr & PAGE_OFFSET_MASK : 0;
+ const uint32_t cPages = cbData > 0 ? (offFirstPage + cbData + PAGE_SIZE - 1) / PAGE_SIZE : 0;
+
+ pGuestParm->u.ptr.cbData = cbData;
+ pGuestParm->u.ptr.offFirstPage = offFirstPage;
+ pGuestParm->u.ptr.cPages = cPages;
+ pGuestParm->u.ptr.fu32Direction = vmmdevHGCMParmTypeToDirection(pGuestParm->enmType);
+
+ if (cbData > 0)
+ {
+ if (cPages == 1)
+ pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
+ else
+ {
+ pGuestParm->u.ptr.paPages = (RTGCPHYS *)RTMemAlloc(cPages * sizeof(RTGCPHYS));
+ AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
+ }
+
+ /* Gonvert the guest linear pointers of pages to physical addresses. */
+ GCPtr &= PAGE_BASE_GC_MASK;
+ for (uint32_t iPage = 0; iPage < cPages; ++iPage)
+ {
+ /* The guest might specify invalid GCPtr, just skip such addresses.
+ * Also if the guest parameters are fetched when restoring an old saved state,
+ * then GCPtr may become invalid and do not have a corresponding GCPhys.
+ * The command restoration routine will take care of this.
+ */
+ RTGCPHYS GCPhys;
+ int rc2 = PDMDevHlpPhysGCPtr2GCPhys(pThis->pDevInsR3, GCPtr, &GCPhys);
+ if (RT_FAILURE(rc2))
+ GCPhys = NIL_RTGCPHYS;
+ LogFunc(("Page %d: %RGv -> %RGp. %Rrc\n", iPage, GCPtr, GCPhys, rc2));
+
+ pGuestParm->u.ptr.paPages[iPage] = GCPhys;
+ GCPtr += PAGE_SIZE;
+ }
+ }
+
+ break;
+ }
+
+ case VMMDevHGCMParmType_PageList:
+ case VMMDevHGCMParmType_ContiguousPageList:
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.offset, HGCMFunctionParameter32, u.PageList.offset);
+ uint32_t cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.size;
+ uint32_t offPageListInfo = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.offset;
+#else
+ uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.size;
+ uint32_t offPageListInfo = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.offset;
+#endif
+ LogFunc(("PageList guest parameter cb %u, offset %u\n", cbData, offPageListInfo));
+
+ ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE - cbTotalData, VERR_INVALID_PARAMETER);
+ cbTotalData += cbData;
+
+/** @todo respect zero byte page lists... */
+ /* Check that the page list info is within the request. */
+ ASSERT_GUEST_RETURN( offPageListInfo >= offExtra
+ && cbHGCMCall >= sizeof(HGCMPageListInfo)
+ && offPageListInfo <= cbHGCMCall - sizeof(HGCMPageListInfo),
+ VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /* The HGCMPageListInfo structure is within the request. */
+ const HGCMPageListInfo *pPageListInfo = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offPageListInfo);
+
+ /* Enough space for page pointers? */
+ const uint32_t cMaxPages = 1 + (cbHGCMCall - offPageListInfo - sizeof(HGCMPageListInfo)) / sizeof(RTGCPHYS);
+ ASSERT_GUEST_RETURN( pPageListInfo->cPages > 0
+ && pPageListInfo->cPages <= cMaxPages,
+ VERR_INVALID_PARAMETER);
+
+ /* Contiguous page lists only ever have a single page. */
+ ASSERT_GUEST_RETURN( pPageListInfo->cPages == 1
+ || pGuestParm->enmType == VMMDevHGCMParmType_PageList, VERR_INVALID_PARAMETER);
+
+ /* Other fields of PageListInfo. */
+ ASSERT_GUEST_RETURN( (pPageListInfo->flags & ~VBOX_HGCM_F_PARM_DIRECTION_BOTH) == 0
+ && pPageListInfo->offFirstPage < PAGE_SIZE,
+ VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /* cbData is not checked to fit into the pages, because the host code does not access
+ * more than the provided number of pages.
+ */
+
+ pGuestParm->u.ptr.cbData = cbData;
+ pGuestParm->u.ptr.offFirstPage = pPageListInfo->offFirstPage;
+ pGuestParm->u.ptr.cPages = pPageListInfo->cPages;
+ pGuestParm->u.ptr.fu32Direction = pPageListInfo->flags;
+ if (pPageListInfo->cPages == 1)
+ {
+ pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
+ pGuestParm->u.ptr.GCPhysSinglePage = pPageListInfo->aPages[0];
+ }
+ else
+ {
+ pGuestParm->u.ptr.paPages = (RTGCPHYS *)RTMemAlloc(pPageListInfo->cPages * sizeof(RTGCPHYS));
+ AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
+
+ for (uint32_t iPage = 0; iPage < pGuestParm->u.ptr.cPages; ++iPage)
+ pGuestParm->u.ptr.paPages[iPage] = pPageListInfo->aPages[iPage];
+ }
+ break;
+ }
+
+ case VMMDevHGCMParmType_Embedded:
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.Embedded.cbData, HGCMFunctionParameter32, u.Embedded.cbData);
+ uint32_t const cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.cbData;
+ uint32_t const offData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.offData;
+ uint32_t const fFlags = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.fFlags;
+#else
+ uint32_t const cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.cbData;
+ uint32_t const offData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.offData;
+ uint32_t const fFlags = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.fFlags;
+#endif
+ LogFunc(("Embedded guest parameter cb %u, offset %u, flags %#x\n", cbData, offData, fFlags));
+
+ ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE - cbTotalData, VERR_INVALID_PARAMETER);
+ cbTotalData += cbData;
+
+ /* Check flags and buffer range. */
+ ASSERT_GUEST_MSG_RETURN(VBOX_HGCM_F_PARM_ARE_VALID(fFlags), ("%#x\n", fFlags), VERR_INVALID_FLAGS);
+ ASSERT_GUEST_MSG_RETURN( offData >= offExtra
+ && offData <= cbHGCMCall
+ && cbData <= cbHGCMCall - offData,
+ ("offData=%#x cbData=%#x cbHGCMCall=%#x offExtra=%#x\n", offData, cbData, cbHGCMCall, offExtra),
+ VERR_INVALID_PARAMETER);
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /* We use part of the ptr member. */
+ pGuestParm->u.ptr.fu32Direction = fFlags;
+ pGuestParm->u.ptr.cbData = cbData;
+ pGuestParm->u.ptr.offFirstPage = offData;
+ pGuestParm->u.ptr.GCPhysSinglePage = pCmd->GCPhys + offData;
+ pGuestParm->u.ptr.cPages = 1;
+ pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
+ break;
+ }
+
+ default:
+ ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Handles VMMDevHGCMCall request.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pHGCMCall The request to handle (cached in host memory).
+ * @param cbHGCMCall Size of the entire request (including HGCM parameters).
+ * @param GCPhys The guest physical address of the request.
+ * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
+ * @param tsArrival The STAM_GET_TS() value when the request arrived.
+ * @param ppLock Pointer to the lock info pointer (latter can be
+ * NULL). Set to NULL if HGCM takes lock ownership.
+ */
+int vmmdevHGCMCall(PVMMDEV pThis, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPhys,
+ VMMDevRequestType enmRequestType, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock)
+{
+ LogFunc(("client id = %d, function = %d, cParms = %d, enmRequestType = %d, fRequestor = %#x\n", pHGCMCall->u32ClientID,
+ pHGCMCall->u32Function, pHGCMCall->cParms, enmRequestType, pHGCMCall->header.header.fRequestor));
+
+ /*
+ * Validation.
+ */
+ ASSERT_GUEST_RETURN(cbHGCMCall >= sizeof(VMMDevHGCMCall), VERR_INVALID_PARAMETER);
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ ASSERT_GUEST_RETURN( enmRequestType == VMMDevReq_HGCMCall32
+ || enmRequestType == VMMDevReq_HGCMCall64, VERR_INVALID_PARAMETER);
+#else
+ ASSERT_GUEST_RETURN(enmRequestType == VMMDevReq_HGCMCall, VERR_INVALID_PARAMETER);
+#endif
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /*
+ * Create a command structure.
+ */
+ PVBOXHGCMCMD pCmd;
+ uint32_t cbHGCMParmStruct;
+ int rc = vmmdevHGCMCallAlloc(pThis, pHGCMCall, cbHGCMCall, GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
+ if (RT_SUCCESS(rc))
+ {
+ pCmd->tsArrival = tsArrival;
+ PVMMDEVREQLOCK pLock = *ppLock;
+ if (pLock)
+ {
+ pCmd->ReqMapLock = pLock->Lock;
+ pCmd->pvReqLocked = pLock->pvReq;
+ *ppLock = NULL;
+ }
+
+ rc = vmmdevHGCMCallFetchGuestParms(pThis, pCmd, pHGCMCall, cbHGCMCall, enmRequestType, cbHGCMParmStruct);
+ if (RT_SUCCESS(rc))
+ {
+ /* Copy guest data to host parameters, so HGCM services can use the data. */
+ rc = vmmdevHGCMInitHostParameters(pThis, pCmd, (uint8_t const *)pHGCMCall);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Pass the function call to HGCM connector for actual processing
+ */
+ vmmdevHGCMAddCommand(pThis, pCmd);
+
+#if 0 /* DONT ENABLE - for performance hacking. */
+ if ( pCmd->u.call.u32Function == 9
+ && pCmd->u.call.cParms == 5)
+ {
+ vmmdevHGCMRemoveCommand(pThis, pCmd);
+
+ if (pCmd->pvReqLocked)
+ {
+ VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
+ pHeader->header.rc = VINF_SUCCESS;
+ pHeader->result = VINF_SUCCESS;
+ pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
+ }
+ else
+ {
+ VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)pHGCMCall;
+ pHeader->header.rc = VINF_SUCCESS;
+ pHeader->result = VINF_SUCCESS;
+ pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
+ PDMDevHlpPhysWrite(pThis->pDevInsR3, GCPhys, pHeader, sizeof(*pHeader));
+ }
+ vmmdevHGCMCmdFree(pThis, pCmd);
+ return VINF_HGCM_ASYNC_EXECUTE; /* ignored, but avoids assertions. */
+ }
+#endif
+
+ rc = pThis->pHGCMDrv->pfnCall(pThis->pHGCMDrv, pCmd,
+ pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
+ pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsArrival);
+
+ if (rc == VINF_HGCM_ASYNC_EXECUTE)
+ {
+ /*
+ * Done. Just update statistics and return.
+ */
+#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
+ uint64_t tsNow;
+ STAM_GET_TS(tsNow);
+ STAM_REL_PROFILE_ADD_PERIOD(&pThis->StatHgcmCmdArrival, tsNow - tsArrival);
+#endif
+ return rc;
+ }
+
+ /*
+ * Failed, bail out.
+ */
+ LogFunc(("pfnCall rc = %Rrc\n", rc));
+ vmmdevHGCMRemoveCommand(pThis, pCmd);
+ }
+ }
+ vmmdevHGCMCmdFree(pThis, pCmd);
+ }
+ return rc;
+}
+
+/**
+ * VMMDevReq_HGCMCancel worker.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pHGCMCancel The request to handle (cached in host memory).
+ * @param GCPhys The address of the request.
+ *
+ * @thread EMT
+ */
+int vmmdevHGCMCancel(PVMMDEV pThis, const VMMDevHGCMCancel *pHGCMCancel, RTGCPHYS GCPhys)
+{
+ NOREF(pHGCMCancel);
+ int rc = vmmdevHGCMCancel2(pThis, GCPhys);
+ return rc == VERR_NOT_FOUND ? VERR_INVALID_PARAMETER : rc;
+}
+
+/**
+ * VMMDevReq_HGCMCancel2 worker.
+ *
+ * @retval VINF_SUCCESS on success.
+ * @retval VERR_NOT_FOUND if the request was not found.
+ * @retval VERR_INVALID_PARAMETER if the request address is invalid.
+ *
+ * @param pThis The VMMDev instance data.
+ * @param GCPhys The address of the request that should be cancelled.
+ *
+ * @thread EMT
+ */
+int vmmdevHGCMCancel2(PVMMDEV pThis, RTGCPHYS GCPhys)
+{
+ if ( GCPhys == 0
+ || GCPhys == NIL_RTGCPHYS
+ || GCPhys == NIL_RTGCPHYS32)
+ {
+ Log(("vmmdevHGCMCancel2: GCPhys=%#x\n", GCPhys));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Locate the command and cancel it while under the protection of
+ * the lock. hgcmCompletedWorker makes assumptions about this.
+ */
+ int rc = vmmdevHGCMCmdListLock(pThis);
+ AssertRCReturn(rc, rc);
+
+ PVBOXHGCMCMD pCmd = vmmdevHGCMFindCommandLocked(pThis, GCPhys);
+ if (pCmd)
+ {
+ pCmd->fCancelled = true;
+
+ Log(("vmmdevHGCMCancel2: Cancelled pCmd=%p / GCPhys=%#x\n", pCmd, GCPhys));
+ if (pThis->pHGCMDrv)
+ pThis->pHGCMDrv->pfnCancelled(pThis->pHGCMDrv, pCmd,
+ pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.u32ClientID
+ : pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? pCmd->u.connect.u32ClientID
+ : pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT ? pCmd->u.disconnect.u32ClientID
+ : 0);
+ }
+ else
+ rc = VERR_NOT_FOUND;
+
+ vmmdevHGCMCmdListUnlock(pThis);
+ return rc;
+}
+
+/** Write HGCM call parameters and buffers back to the guest request and memory.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pCmd Completed call command.
+ * @param pHGCMCall The guestrequest which needs updating (cached in the host memory).
+ * @param pbReq The request copy or locked memory for handling
+ * embedded buffers.
+ */
+static int vmmdevHGCMCompleteCallRequest(PVMMDEV pThis, PVBOXHGCMCMD pCmd, VMMDevHGCMCall *pHGCMCall, uint8_t *pbReq)
+{
+ AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
+
+ /*
+ * Go over parameter descriptions saved in pCmd.
+ */
+ uint32_t i;
+ for (i = 0; i < pCmd->u.call.cParms; ++i)
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+ VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
+
+ const HGCMFunctionParameterType enmType = pGuestParm->enmType;
+ switch (enmType)
+ {
+ case VMMDevHGCMParmType_32bit:
+ case VMMDevHGCMParmType_64bit:
+ {
+ const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
+ const void *pvSrc = enmType == VMMDevHGCMParmType_32bit ? (void *)&pHostParm->u.uint32
+ : (void *)&pHostParm->u.uint64;
+ memcpy((uint8_t *)pHGCMCall + pVal->offValue, pvSrc, pVal->cbValue);
+ break;
+ }
+
+ case VMMDevHGCMParmType_LinAddr_In:
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ case VMMDevHGCMParmType_PageList:
+ {
+/** @todo Update the return buffer size. */
+ const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+ if ( pPtr->cbData > 0
+ && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
+ {
+ const void *pvSrc = pHostParm->u.pointer.addr;
+ uint32_t cbSrc = pHostParm->u.pointer.size;
+ int rc = vmmdevHGCMGuestBufferWrite(pThis->pDevInsR3, pPtr, pvSrc, cbSrc);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ break;
+ }
+
+ case VMMDevHGCMParmType_Embedded:
+ {
+/** @todo Update the return buffer size! */
+ const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+ if ( pPtr->cbData > 0
+ && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
+ {
+ const void *pvSrc = pHostParm->u.pointer.addr;
+ uint32_t cbSrc = pHostParm->u.pointer.size;
+ uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
+ memcpy(pbReq + pPtr->offFirstPage, pvSrc, cbToCopy);
+ }
+ break;
+ }
+
+ case VMMDevHGCMParmType_ContiguousPageList:
+ {
+/** @todo Update the return buffer size. */
+ const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+ if ( pPtr->cbData > 0
+ && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
+ {
+ const void *pvSrc = pHostParm->u.pointer.addr;
+ uint32_t cbSrc = pHostParm->u.pointer.size;
+ uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
+ int rc = PDMDevHlpPhysWrite(pThis->pDevInsR3, pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
+ pvSrc, cbToCopy);
+ if (RT_FAILURE(rc))
+ break;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+/** Update HGCM request in the guest memory and mark it as completed.
+ *
+ * @returns VINF_SUCCESS or VERR_CANCELLED.
+ * @param pInterface Pointer to this PDM interface.
+ * @param result HGCM completion status code (VBox status code).
+ * @param pCmd Completed command, which contains updated host parameters.
+ *
+ * @thread EMT
+ */
+static int hgcmCompletedWorker(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDevState, IHGCMPort);
+#ifdef VBOX_WITH_DTRACE
+ uint32_t idFunction = 0;
+ uint32_t idClient = 0;
+#endif
+
+ if (result == VINF_HGCM_SAVE_STATE)
+ {
+ /* If the completion routine was called while the HGCM service saves its state,
+ * then currently nothing to be done here. The pCmd stays in the list and will
+ * be saved later when the VMMDev state will be saved and re-submitted on load.
+ *
+ * It it assumed that VMMDev saves state after the HGCM services (VMMDev driver
+ * attached by constructor before it registers its SSM state), and, therefore,
+ * VBOXHGCMCMD structures are not removed by vmmdevHGCMSaveState from the list,
+ * while HGCM uses them.
+ */
+ LogFlowFunc(("VINF_HGCM_SAVE_STATE for command %p\n", pCmd));
+ return VINF_SUCCESS;
+ }
+
+ VBOXDD_HGCMCALL_COMPLETED_EMT(pCmd, result);
+
+ int rc = VINF_SUCCESS;
+
+ /*
+ * The cancellation protocol requires us to remove the command here
+ * and then check the flag. Cancelled commands must not be written
+ * back to guest memory.
+ */
+ vmmdevHGCMRemoveCommand(pThis, pCmd);
+
+ if (RT_LIKELY(!pCmd->fCancelled))
+ {
+ if (!pCmd->pvReqLocked)
+ {
+ /*
+ * Request is not locked:
+ */
+ VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
+ if (pHeader)
+ {
+ /*
+ * Read the request from the guest memory for updating.
+ * The request data is not be used for anything but checking the request type.
+ */
+ PDMDevHlpPhysRead(pThis->pDevInsR3, pCmd->GCPhys, pHeader, pCmd->cbRequest);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+
+ /* Verify the request type. This is the only field which is used from the guest memory. */
+ const VMMDevRequestType enmRequestType = pHeader->header.requestType;
+ if ( enmRequestType == pCmd->enmRequestType
+ || enmRequestType == VMMDevReq_HGCMCancel)
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /*
+ * Update parameters and data buffers.
+ */
+ switch (enmRequestType)
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ case VMMDevReq_HGCMCall64:
+ case VMMDevReq_HGCMCall32:
+#else
+ case VMMDevReq_HGCMCall:
+#endif
+ {
+ VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
+ rc = vmmdevHGCMCompleteCallRequest(pThis, pCmd, pHGCMCall, (uint8_t *)pHeader);
+#ifdef VBOX_WITH_DTRACE
+ idFunction = pCmd->u.call.u32Function;
+ idClient = pCmd->u.call.u32ClientID;
+#endif
+ break;
+ }
+
+ case VMMDevReq_HGCMConnect:
+ {
+ /* save the client id in the guest request packet */
+ VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
+ pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
+ break;
+ }
+
+ default:
+ /* make compiler happy */
+ break;
+ }
+ }
+ else
+ {
+ /* Guest has changed the command type. */
+ LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
+ pCmd->enmCmdType, pHeader->header.requestType));
+
+ ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
+ }
+
+ /* Setup return code for the guest. */
+ if (RT_SUCCESS(rc))
+ pHeader->result = result;
+ else
+ pHeader->result = rc;
+
+ /* First write back the request. */
+ PDMDevHlpPhysWrite(pThis->pDevInsR3, pCmd->GCPhys, pHeader, pCmd->cbRequest);
+
+ /* Mark request as processed. */
+ pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
+
+ /* Second write the flags to mark the request as processed. */
+ PDMDevHlpPhysWrite(pThis->pDevInsR3, pCmd->GCPhys + RT_UOFFSETOF(VMMDevHGCMRequestHeader, fu32Flags),
+ &pHeader->fu32Flags, sizeof(pHeader->fu32Flags));
+
+ /* Now, when the command was removed from the internal list, notify the guest. */
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_HGCM);
+
+ RTMemFree(pHeader);
+ }
+ else
+ {
+ LogRelMax(10, ("VMMDev: Failed to allocate %u bytes for HGCM request completion!!!\n", pCmd->cbRequest));
+ }
+ }
+ /*
+ * Request was locked:
+ */
+ else
+ {
+ VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
+
+ /* Verify the request type. This is the only field which is used from the guest memory. */
+ const VMMDevRequestType enmRequestType = pHeader->header.requestType;
+ if ( enmRequestType == pCmd->enmRequestType
+ || enmRequestType == VMMDevReq_HGCMCancel)
+ {
+ RT_UNTRUSTED_VALIDATED_FENCE();
+
+ /*
+ * Update parameters and data buffers.
+ */
+ switch (enmRequestType)
+ {
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ case VMMDevReq_HGCMCall64:
+ case VMMDevReq_HGCMCall32:
+#else
+ case VMMDevReq_HGCMCall:
+#endif
+ {
+ VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
+ rc = vmmdevHGCMCompleteCallRequest(pThis, pCmd, pHGCMCall, (uint8_t *)pHeader);
+#ifdef VBOX_WITH_DTRACE
+ idFunction = pCmd->u.call.u32Function;
+ idClient = pCmd->u.call.u32ClientID;
+#endif
+ break;
+ }
+
+ case VMMDevReq_HGCMConnect:
+ {
+ /* save the client id in the guest request packet */
+ VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
+ pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
+ break;
+ }
+
+ default:
+ /* make compiler happy */
+ break;
+ }
+ }
+ else
+ {
+ /* Guest has changed the command type. */
+ LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
+ pCmd->enmCmdType, pHeader->header.requestType));
+
+ ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
+ }
+
+ /* Setup return code for the guest. */
+ if (RT_SUCCESS(rc))
+ pHeader->result = result;
+ else
+ pHeader->result = rc;
+
+ /* Mark request as processed. */
+ ASMAtomicOrU32(&pHeader->fu32Flags, VBOX_HGCM_REQ_DONE);
+
+ /* Now, when the command was removed from the internal list, notify the guest. */
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_HGCM);
+ }
+
+ /* Set the status to success for now, though we might consider passing
+ along the vmmdevHGCMCompleteCallRequest errors... */
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ LogFlowFunc(("Cancelled command %p\n", pCmd));
+ rc = VERR_CANCELLED;
+ }
+
+#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
+ /* Save for final stats. */
+ uint64_t const tsArrival = pCmd->tsArrival;
+ uint64_t const tsComplete = pCmd->tsComplete;
+#endif
+
+ /* Deallocate the command memory. */
+ VBOXDD_HGCMCALL_COMPLETED_DONE(pCmd, idFunction, idClient, result);
+ vmmdevHGCMCmdFree(pThis, pCmd);
+
+#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
+ /* Update stats. */
+ uint64_t tsNow;
+ STAM_GET_TS(tsNow);
+ STAM_REL_PROFILE_ADD_PERIOD(&pThis->StatHgcmCmdCompletion, tsNow - tsComplete);
+ if (tsArrival != 0)
+ STAM_REL_PROFILE_ADD_PERIOD(&pThis->StatHgcmCmdTotal, tsNow - tsArrival);
+#endif
+
+ return rc;
+}
+
+/**
+ * HGCM callback for request completion. Forwards to hgcmCompletedWorker.
+ *
+ * @returns VINF_SUCCESS or VERR_CANCELLED.
+ * @param pInterface Pointer to this PDM interface.
+ * @param result HGCM completion status code (VBox status code).
+ * @param pCmd Completed command, which contains updated host parameters.
+ */
+DECLCALLBACK(int) hgcmCompleted(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
+{
+#if 0 /* This seems to be significantly slower. Half of MsgTotal time seems to be spend here. */
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDevState, IHGCMPort);
+ STAM_GET_TS(pCmd->tsComplete);
+
+ VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
+
+/** @todo no longer necessary to forward to EMT, but it might be more
+ * efficient...? */
+ /* Not safe to execute asynchronously; forward to EMT */
+ int rc = VMR3ReqCallVoidNoWait(PDMDevHlpGetVM(pThis->pDevInsR3), VMCPUID_ANY,
+ (PFNRT)hgcmCompletedWorker, 3, pInterface, result, pCmd);
+ AssertRC(rc);
+ return VINF_SUCCESS; /* cannot tell if canceled or not... */
+#else
+ STAM_GET_TS(pCmd->tsComplete);
+ VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
+ return hgcmCompletedWorker(pInterface, result, pCmd);
+#endif
+}
+
+/**
+ * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdRestored}
+ */
+DECLCALLBACK(bool) hgcmIsCmdRestored(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
+{
+ RT_NOREF(pInterface);
+ return pCmd && pCmd->fRestored;
+}
+
+/**
+ * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdCancelled}
+ */
+DECLCALLBACK(bool) hgcmIsCmdCancelled(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
+{
+ RT_NOREF(pInterface);
+ return pCmd && pCmd->fCancelled;
+}
+
+/**
+ * @interface_method_impl{PDMIHGCMPORT,pfnGetRequestor}
+ */
+DECLCALLBACK(uint32_t) hgcmGetRequestor(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDevState, IHGCMPort);
+ AssertPtrReturn(pCmd, VMMDEV_REQUESTOR_LOWEST);
+ if (pThis->guestInfo2.fFeatures & VBOXGSTINFO2_F_REQUESTOR_INFO)
+ return pCmd->fRequestor;
+ return VMMDEV_REQUESTOR_LEGACY;
+}
+
+/**
+ * @interface_method_impl{PDMIHGCMPORT,pfnGetVMMDevSessionId}
+ */
+DECLCALLBACK(uint64_t) hgcmGetVMMDevSessionId(PPDMIHGCMPORT pInterface)
+{
+ PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDevState, IHGCMPort);
+ return pThis->idSession;
+}
+
+/** Save information about pending HGCM requests from pThis->listHGCMCmd.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pSSM SSM handle for SSM functions.
+ *
+ * @thread EMT
+ */
+int vmmdevHGCMSaveState(PVMMDEV pThis, PSSMHANDLE pSSM)
+{
+ LogFlowFunc(("\n"));
+
+ /* Compute how many commands are pending. */
+ uint32_t cCmds = 0;
+ PVBOXHGCMCMD pCmd;
+ RTListForEach(&pThis->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
+ {
+ LogFlowFunc(("pCmd %p\n", pCmd));
+ ++cCmds;
+ }
+ LogFlowFunc(("cCmds = %d\n", cCmds));
+
+ /* Save number of commands. */
+ int rc = SSMR3PutU32(pSSM, cCmds);
+ AssertRCReturn(rc, rc);
+
+ if (cCmds > 0)
+ {
+ RTListForEach(&pThis->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
+ {
+ LogFlowFunc(("Saving %RGp, size %d\n", pCmd->GCPhys, pCmd->cbRequest));
+
+ /** @todo Don't save cancelled requests! It serves no purpose. See restore and
+ * @bugref{4032#c4} for details. */
+ SSMR3PutU32 (pSSM, (uint32_t)pCmd->enmCmdType);
+ SSMR3PutBool (pSSM, pCmd->fCancelled);
+ SSMR3PutGCPhys (pSSM, pCmd->GCPhys);
+ SSMR3PutU32 (pSSM, pCmd->cbRequest);
+ SSMR3PutU32 (pSSM, (uint32_t)pCmd->enmRequestType);
+ const uint32_t cParms = pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.cParms : 0;
+ rc = SSMR3PutU32(pSSM, cParms);
+ AssertRCReturn(rc, rc);
+
+ if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ SSMR3PutU32 (pSSM, pCmd->u.call.u32ClientID);
+ rc = SSMR3PutU32(pSSM, pCmd->u.call.u32Function);
+ AssertRCReturn(rc, rc);
+
+ /* Guest parameters. */
+ uint32_t i;
+ for (i = 0; i < pCmd->u.call.cParms; ++i)
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+
+ rc = SSMR3PutU32(pSSM, (uint32_t)pGuestParm->enmType);
+ AssertRCReturn(rc, rc);
+
+ if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
+ || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
+ {
+ const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
+ SSMR3PutU64 (pSSM, pVal->u64Value);
+ SSMR3PutU32 (pSSM, pVal->offValue);
+ rc = SSMR3PutU32(pSSM, pVal->cbValue);
+ }
+ else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
+ || pGuestParm->enmType == VMMDevHGCMParmType_PageList
+ || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
+ || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
+ {
+ const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+ SSMR3PutU32 (pSSM, pPtr->cbData);
+ SSMR3PutU32 (pSSM, pPtr->offFirstPage);
+ SSMR3PutU32 (pSSM, pPtr->cPages);
+ rc = SSMR3PutU32(pSSM, pPtr->fu32Direction);
+
+ uint32_t iPage;
+ for (iPage = 0; iPage < pPtr->cPages; ++iPage)
+ rc = SSMR3PutGCPhys(pSSM, pPtr->paPages[iPage]);
+ }
+ else
+ {
+ AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
+ }
+ AssertRCReturn(rc, rc);
+ }
+ }
+ else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
+ {
+ SSMR3PutU32(pSSM, pCmd->u.connect.u32ClientID);
+ SSMR3PutMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
+ }
+ else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
+ {
+ SSMR3PutU32(pSSM, pCmd->u.disconnect.u32ClientID);
+ }
+ else
+ {
+ AssertFailedReturn(VERR_INTERNAL_ERROR);
+ }
+
+ /* A reserved field, will allow to extend saved data for a command. */
+ rc = SSMR3PutU32(pSSM, 0);
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+ /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
+ rc = SSMR3PutU32(pSSM, 0);
+ AssertRCReturn(rc, rc);
+
+ return rc;
+}
+
+/** Load information about pending HGCM requests.
+ *
+ * Allocate VBOXHGCMCMD commands and add them to pThis->listHGCMCmd temporarily.
+ * vmmdevHGCMLoadStateDone will process the temporary list. This includes
+ * loading the correct fRequestor fields.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param pSSM SSM handle for SSM functions.
+ * @param uVersion Saved state version.
+ *
+ * @thread EMT
+ */
+int vmmdevHGCMLoadState(PVMMDEV pThis, PSSMHANDLE pSSM, uint32_t uVersion)
+{
+ LogFlowFunc(("\n"));
+
+ pThis->u32SSMVersion = uVersion; /* For vmmdevHGCMLoadStateDone */
+
+ /* Read how many commands were pending. */
+ uint32_t cCmds = 0;
+ int rc = SSMR3GetU32(pSSM, &cCmds);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("cCmds = %d\n", cCmds));
+
+ if (uVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
+ {
+ /* Saved information about all HGCM parameters. */
+ uint32_t u32;
+
+ uint32_t iCmd;
+ for (iCmd = 0; iCmd < cCmds; ++iCmd)
+ {
+ /* Command fields. */
+ VBOXHGCMCMDTYPE enmCmdType;
+ bool fCancelled;
+ RTGCPHYS GCPhys;
+ uint32_t cbRequest;
+ VMMDevRequestType enmRequestType;
+ uint32_t cParms;
+
+ SSMR3GetU32 (pSSM, &u32);
+ enmCmdType = (VBOXHGCMCMDTYPE)u32;
+ SSMR3GetBool (pSSM, &fCancelled);
+ SSMR3GetGCPhys (pSSM, &GCPhys);
+ SSMR3GetU32 (pSSM, &cbRequest);
+ SSMR3GetU32 (pSSM, &u32);
+ enmRequestType = (VMMDevRequestType)u32;
+ rc = SSMR3GetU32(pSSM, &cParms);
+ AssertRCReturn(rc, rc);
+
+ PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, enmCmdType, GCPhys, cbRequest, cParms, 0 /*fRequestor*/);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ pCmd->fCancelled = fCancelled;
+ pCmd->GCPhys = GCPhys;
+ pCmd->cbRequest = cbRequest;
+ pCmd->enmRequestType = enmRequestType;
+
+ if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
+ {
+ SSMR3GetU32 (pSSM, &pCmd->u.call.u32ClientID);
+ rc = SSMR3GetU32(pSSM, &pCmd->u.call.u32Function);
+ AssertRCReturn(rc, rc);
+
+ /* Guest parameters. */
+ uint32_t i;
+ for (i = 0; i < cParms; ++i)
+ {
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
+
+ rc = SSMR3GetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+ pGuestParm->enmType = (HGCMFunctionParameterType)u32;
+
+ if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
+ || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
+ {
+ VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
+ SSMR3GetU64 (pSSM, &pVal->u64Value);
+ SSMR3GetU32 (pSSM, &pVal->offValue);
+ rc = SSMR3GetU32(pSSM, &pVal->cbValue);
+ }
+ else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
+ || pGuestParm->enmType == VMMDevHGCMParmType_PageList
+ || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
+ || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
+ {
+ VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
+ SSMR3GetU32 (pSSM, &pPtr->cbData);
+ SSMR3GetU32 (pSSM, &pPtr->offFirstPage);
+ SSMR3GetU32 (pSSM, &pPtr->cPages);
+ rc = SSMR3GetU32(pSSM, &pPtr->fu32Direction);
+ if (RT_SUCCESS(rc))
+ {
+ if (pPtr->cPages == 1)
+ pPtr->paPages = &pPtr->GCPhysSinglePage;
+ else
+ {
+ AssertReturn( pGuestParm->enmType != VMMDevHGCMParmType_Embedded
+ && pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList, VERR_INTERNAL_ERROR_3);
+ pPtr->paPages = (RTGCPHYS *)RTMemAlloc(pPtr->cPages * sizeof(RTGCPHYS));
+ AssertStmt(pPtr->paPages, rc = VERR_NO_MEMORY);
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t iPage;
+ for (iPage = 0; iPage < pPtr->cPages; ++iPage)
+ rc = SSMR3GetGCPhys(pSSM, &pPtr->paPages[iPage]);
+ }
+ }
+ }
+ else
+ {
+ AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
+ }
+ AssertRCReturn(rc, rc);
+ }
+ }
+ else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
+ {
+ SSMR3GetU32(pSSM, &pCmd->u.connect.u32ClientID);
+ rc = SSMR3GetMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
+ AssertRCReturn(rc, rc);
+ }
+ else if (enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
+ {
+ rc = SSMR3GetU32(pSSM, &pCmd->u.disconnect.u32ClientID);
+ AssertRCReturn(rc, rc);
+ }
+ else
+ {
+ AssertFailedReturn(VERR_INTERNAL_ERROR);
+ }
+
+ /* A reserved field, will allow to extend saved data for a command. */
+ rc = SSMR3GetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+
+ /*
+ * Do not restore cancelled calls. Why do we save them to start with?
+ *
+ * The guest memory no longer contains a valid request! So, it is not
+ * possible to restore it. The memory is often reused for a new request
+ * by now and we will end up trying to complete that more than once if
+ * we restore a cancelled call. In some cases VERR_HGCM_INVALID_CLIENT_ID
+ * is returned, though it might just be silent memory corruption.
+ */
+ /* See current version above. */
+ if (!fCancelled)
+ vmmdevHGCMAddCommand(pThis, pCmd);
+ else
+ {
+ Log(("vmmdevHGCMLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
+ enmCmdType, GCPhys, cbRequest));
+ vmmdevHGCMCmdFree(pThis, pCmd);
+ }
+ }
+
+ /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
+ rc = SSMR3GetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+ }
+ else if (uVersion >= 9)
+ {
+ /* Version 9+: Load information about commands. Pre-rewrite. */
+ uint32_t u32;
+
+ uint32_t iCmd;
+ for (iCmd = 0; iCmd < cCmds; ++iCmd)
+ {
+ VBOXHGCMCMDTYPE enmCmdType;
+ bool fCancelled;
+ RTGCPHYS GCPhys;
+ uint32_t cbRequest;
+ uint32_t cLinAddrs;
+
+ SSMR3GetGCPhys (pSSM, &GCPhys);
+ rc = SSMR3GetU32(pSSM, &cbRequest);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
+
+ /* For uVersion <= 12, this was the size of entire command.
+ * Now the command is reconstructed in vmmdevHGCMLoadStateDone.
+ */
+ if (uVersion <= 12)
+ SSMR3Skip(pSSM, sizeof (uint32_t));
+
+ SSMR3GetU32 (pSSM, &u32);
+ enmCmdType = (VBOXHGCMCMDTYPE)u32;
+ SSMR3GetBool (pSSM, &fCancelled);
+ /* How many linear pointers. Always 0 if not a call command. */
+ rc = SSMR3GetU32(pSSM, &cLinAddrs);
+ AssertRCReturn(rc, rc);
+
+ PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, enmCmdType, GCPhys, cbRequest, cLinAddrs, 0 /*fRequestor*/);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ pCmd->fCancelled = fCancelled;
+ pCmd->GCPhys = GCPhys;
+ pCmd->cbRequest = cbRequest;
+
+ if (cLinAddrs > 0)
+ {
+ /* Skip number of pages for all LinAddrs in this command. */
+ SSMR3Skip(pSSM, sizeof(uint32_t));
+
+ uint32_t i;
+ for (i = 0; i < cLinAddrs; ++i)
+ {
+ VBOXHGCMPARMPTR * const pPtr = &pCmd->u.call.paGuestParms[i].u.ptr;
+
+ /* Index of the parameter. Use cbData field to store the index. */
+ SSMR3GetU32 (pSSM, &pPtr->cbData);
+ SSMR3GetU32 (pSSM, &pPtr->offFirstPage);
+ rc = SSMR3GetU32(pSSM, &pPtr->cPages);
+ AssertRCReturn(rc, rc);
+
+ pPtr->paPages = (RTGCPHYS *)RTMemAlloc(pPtr->cPages * sizeof(RTGCPHYS));
+ AssertReturn(pPtr->paPages, VERR_NO_MEMORY);
+
+ uint32_t iPage;
+ for (iPage = 0; iPage < pPtr->cPages; ++iPage)
+ rc = SSMR3GetGCPhys(pSSM, &pPtr->paPages[iPage]);
+ }
+ }
+
+ /* A reserved field, will allow to extend saved data for a command. */
+ rc = SSMR3GetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+
+ /* See current version above. */
+ if (!fCancelled)
+ vmmdevHGCMAddCommand(pThis, pCmd);
+ else
+ {
+ Log(("vmmdevHGCMLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
+ enmCmdType, GCPhys, cbRequest));
+ vmmdevHGCMCmdFree(pThis, pCmd);
+ }
+ }
+
+ /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
+ rc = SSMR3GetU32(pSSM, &u32);
+ AssertRCReturn(rc, rc);
+ }
+ else
+ {
+ /* Ancient. Only the guest physical address is saved. */
+ uint32_t iCmd;
+ for (iCmd = 0; iCmd < cCmds; ++iCmd)
+ {
+ RTGCPHYS GCPhys;
+ uint32_t cbRequest;
+
+ SSMR3GetGCPhys(pSSM, &GCPhys);
+ rc = SSMR3GetU32(pSSM, &cbRequest);
+ AssertRCReturn(rc, rc);
+
+ LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
+
+ PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_LOADSTATE, GCPhys, cbRequest, 0, 0 /*fRequestor*/);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ vmmdevHGCMAddCommand(pThis, pCmd);
+ }
+ }
+
+ return rc;
+}
+
+/** Restore HGCM connect command loaded from old saved state.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param u32SSMVersion The saved state version the command has been loaded from.
+ * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
+ * @param pReq The guest request (cached in host memory).
+ * @param cbReq Size of the guest request.
+ * @param enmRequestType Type of the HGCM request.
+ * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
+ */
+static int vmmdevHGCMRestoreConnect(PVMMDEV pThis, uint32_t u32SSMVersion, const VBOXHGCMCMD *pLoadedCmd,
+ VMMDevHGCMConnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
+ VBOXHGCMCMD **ppRestoredCmd)
+{
+ RT_NOREF(pThis);
+
+ int rc = VINF_SUCCESS;
+
+ /* Verify the request. */
+ ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
+ if (u32SSMVersion >= 9)
+ ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT, VERR_MISMATCH);
+
+ PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_CONNECT, pLoadedCmd->GCPhys, cbReq, 0,
+ pReq->header.header.fRequestor);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ Assert(pLoadedCmd->fCancelled == false);
+ pCmd->fCancelled = false;
+ pCmd->fRestored = true;
+ pCmd->enmRequestType = enmRequestType;
+
+ vmmdevHGCMConnectFetch(pReq, pCmd);
+
+ if (RT_SUCCESS(rc))
+ *ppRestoredCmd = pCmd;
+
+ return rc;
+}
+
+/** Restore HGCM disconnect command loaded from old saved state.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param u32SSMVersion The saved state version the command has been loaded from.
+ * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
+ * @param pReq The guest request (cached in host memory).
+ * @param cbReq Size of the guest request.
+ * @param enmRequestType Type of the HGCM request.
+ * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
+ */
+static int vmmdevHGCMRestoreDisconnect(PVMMDEV pThis, uint32_t u32SSMVersion, const VBOXHGCMCMD *pLoadedCmd,
+ VMMDevHGCMDisconnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
+ VBOXHGCMCMD **ppRestoredCmd)
+{
+ RT_NOREF(pThis);
+
+ int rc = VINF_SUCCESS;
+
+ /* Verify the request. */
+ ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
+ if (u32SSMVersion >= 9)
+ ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT, VERR_MISMATCH);
+
+ PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_DISCONNECT, pLoadedCmd->GCPhys, cbReq, 0,
+ pReq->header.header.fRequestor);
+ AssertReturn(pCmd, VERR_NO_MEMORY);
+
+ Assert(pLoadedCmd->fCancelled == false);
+ pCmd->fCancelled = false;
+ pCmd->fRestored = true;
+ pCmd->enmRequestType = enmRequestType;
+
+ vmmdevHGCMDisconnectFetch(pReq, pCmd);
+
+ if (RT_SUCCESS(rc))
+ *ppRestoredCmd = pCmd;
+
+ return rc;
+}
+
+/** Restore HGCM call command loaded from old saved state.
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param u32SSMVersion The saved state version the command has been loaded from.
+ * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
+ * @param pReq The guest request (cached in host memory).
+ * @param cbReq Size of the guest request.
+ * @param enmRequestType Type of the HGCM request.
+ * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
+ */
+static int vmmdevHGCMRestoreCall(PVMMDEV pThis, uint32_t u32SSMVersion, const VBOXHGCMCMD *pLoadedCmd,
+ VMMDevHGCMCall *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
+ VBOXHGCMCMD **ppRestoredCmd)
+{
+ int rc = VINF_SUCCESS;
+
+ /* Verify the request. */
+ ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
+ if (u32SSMVersion >= 9)
+ {
+ ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_MISMATCH);
+ Assert(pLoadedCmd->fCancelled == false);
+ }
+
+ PVBOXHGCMCMD pCmd;
+ uint32_t cbHGCMParmStruct;
+ rc = vmmdevHGCMCallAlloc(pThis, pReq, cbReq, pLoadedCmd->GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /* pLoadedCmd is fake, it does not contain actual call parameters. Only pagelists for LinAddr. */
+ pCmd->fCancelled = false;
+ pCmd->fRestored = true;
+ pCmd->enmRequestType = enmRequestType;
+
+ rc = vmmdevHGCMCallFetchGuestParms(pThis, pCmd, pReq, cbReq, enmRequestType, cbHGCMParmStruct);
+ if (RT_SUCCESS(rc))
+ {
+ /* Update LinAddr parameters from pLoadedCmd.
+ * pLoadedCmd->u.call.cParms is actually the number of LinAddrs, see vmmdevHGCMLoadState.
+ */
+ uint32_t iLinAddr;
+ for (iLinAddr = 0; iLinAddr < pLoadedCmd->u.call.cParms; ++iLinAddr)
+ {
+ VBOXHGCMGUESTPARM * const pLoadedParm = &pLoadedCmd->u.call.paGuestParms[iLinAddr];
+ /* pLoadedParm->cbData is actually index of the LinAddr parameter, see vmmdevHGCMLoadState. */
+ const uint32_t iParm = pLoadedParm->u.ptr.cbData;
+ ASSERT_GUEST_STMT_BREAK(iParm < pCmd->u.call.cParms, rc = VERR_MISMATCH);
+
+ VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[iParm];
+ ASSERT_GUEST_STMT_BREAK( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
+ || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr,
+ rc = VERR_MISMATCH);
+ ASSERT_GUEST_STMT_BREAK( pLoadedParm->u.ptr.offFirstPage == pGuestParm->u.ptr.offFirstPage
+ && pLoadedParm->u.ptr.cPages == pGuestParm->u.ptr.cPages,
+ rc = VERR_MISMATCH);
+ memcpy(pGuestParm->u.ptr.paPages, pLoadedParm->u.ptr.paPages, pGuestParm->u.ptr.cPages * sizeof(RTGCPHYS));
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ *ppRestoredCmd = pCmd;
+ else
+ vmmdevHGCMCmdFree(pThis, pCmd);
+
+ return rc;
+}
+
+/** Allocate and initialize a HGCM command using the given request (pReqHdr)
+ * and command loaded from saved state (pCmd).
+ *
+ * @returns VBox status code that the guest should see.
+ * @param pThis The VMMDev instance data.
+ * @param u32SSMVersion Saved state version.
+ * @param pLoadedCmd HGCM command which needs restoration.
+ * @param pReqHdr The request (cached in host memory).
+ * @param cbReq Size of the entire request (including HGCM parameters).
+ * @param ppRestoredCmd Where to store pointer to restored command.
+ */
+static int vmmdevHGCMRestoreCommand(PVMMDEV pThis, uint32_t u32SSMVersion, const VBOXHGCMCMD *pLoadedCmd,
+ const VMMDevHGCMRequestHeader *pReqHdr, uint32_t cbReq,
+ VBOXHGCMCMD **ppRestoredCmd)
+{
+ int rc = VINF_SUCCESS;
+
+ /* Verify the request. */
+ ASSERT_GUEST_RETURN(cbReq >= sizeof(VMMDevHGCMRequestHeader), VERR_MISMATCH);
+ ASSERT_GUEST_RETURN(cbReq == pReqHdr->header.size, VERR_MISMATCH);
+
+ const VMMDevRequestType enmRequestType = pReqHdr->header.requestType;
+ switch (enmRequestType)
+ {
+ case VMMDevReq_HGCMConnect:
+ {
+ VMMDevHGCMConnect *pReq = (VMMDevHGCMConnect *)pReqHdr;
+ rc = vmmdevHGCMRestoreConnect(pThis, u32SSMVersion, pLoadedCmd, pReq, cbReq, enmRequestType,
+ ppRestoredCmd);
+ break;
+ }
+
+ case VMMDevReq_HGCMDisconnect:
+ {
+ VMMDevHGCMDisconnect *pReq = (VMMDevHGCMDisconnect *)pReqHdr;
+ rc = vmmdevHGCMRestoreDisconnect(pThis, u32SSMVersion, pLoadedCmd, pReq, cbReq, enmRequestType,
+ ppRestoredCmd);
+ break;
+ }
+
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ case VMMDevReq_HGCMCall32:
+ case VMMDevReq_HGCMCall64:
+#else
+ case VMMDevReq_HGCMCall:
+#endif
+ {
+ VMMDevHGCMCall *pReq = (VMMDevHGCMCall *)pReqHdr;
+ rc = vmmdevHGCMRestoreCall(pThis, u32SSMVersion, pLoadedCmd, pReq, cbReq, enmRequestType,
+ ppRestoredCmd);
+ break;
+ }
+
+ default:
+ ASSERT_GUEST_FAILED_RETURN(VERR_MISMATCH);
+ }
+
+ return rc;
+}
+
+/** Resubmit pending HGCM commands which were loaded form saved state.
+ *
+ * @returns VBox status code.
+ * @param pThis The VMMDev instance data.
+ *
+ * @thread EMT
+ */
+int vmmdevHGCMLoadStateDone(PVMMDEV pThis)
+{
+ /*
+ * Resubmit pending HGCM commands to services.
+ *
+ * pThis->pHGCMCmdList contains commands loaded by vmmdevHGCMLoadState.
+ *
+ * Legacy saved states (pre VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
+ * do not have enough information about the command parameters,
+ * therefore it is necessary to reload at least some data from the
+ * guest memory to construct commands.
+ *
+ * There are two types of legacy saved states which contain:
+ * 1) the guest physical address and size of request;
+ * 2) additionally page lists for LinAddr parameters.
+ *
+ * Legacy commands have enmCmdType = VBOXHGCMCMDTYPE_LOADSTATE?
+ */
+
+ int rcFunc = VINF_SUCCESS; /* This status code will make the function fail. I.e. VM will not start. */
+
+ /* Get local copy of the list of loaded commands. */
+ RTLISTANCHOR listLoadedCommands;
+ RTListMove(&listLoadedCommands, &pThis->listHGCMCmd);
+
+ /* Resubmit commands. */
+ PVBOXHGCMCMD pCmd, pNext;
+ RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
+ {
+ int rcCmd = VINF_SUCCESS; /* This status code will make the HGCM command fail for the guest. */
+
+ RTListNodeRemove(&pCmd->node);
+
+ /*
+ * Re-read the request from the guest memory.
+ * It will be used to:
+ * * reconstruct commands if legacy saved state has been restored;
+ * * report an error to the guest if resubmit failed.
+ */
+ VMMDevHGCMRequestHeader *pReqHdr = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
+ AssertBreakStmt(pReqHdr, vmmdevHGCMCmdFree(pThis, pCmd); rcFunc = VERR_NO_MEMORY);
+
+ PDMDevHlpPhysRead(pThis->pDevInsR3, pCmd->GCPhys, pReqHdr, pCmd->cbRequest);
+ RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
+
+ if (pThis->pHGCMDrv)
+ {
+ /*
+ * Reconstruct legacy commands.
+ */
+ if (RT_LIKELY(pThis->u32SSMVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS))
+ { /* likely */ }
+ else
+ {
+ PVBOXHGCMCMD pRestoredCmd = NULL;
+ rcCmd = vmmdevHGCMRestoreCommand(pThis, pThis->u32SSMVersion, pCmd,
+ pReqHdr, pCmd->cbRequest, &pRestoredCmd);
+ if (RT_SUCCESS(rcCmd))
+ {
+ Assert(pCmd != pRestoredCmd); /* vmmdevHGCMRestoreCommand must allocate restored command. */
+ vmmdevHGCMCmdFree(pThis, pCmd);
+ pCmd = pRestoredCmd;
+ }
+ }
+
+ /* Resubmit commands. */
+ if (RT_SUCCESS(rcCmd))
+ {
+ switch (pCmd->enmCmdType)
+ {
+ case VBOXHGCMCMDTYPE_CONNECT:
+ {
+ vmmdevHGCMAddCommand(pThis, pCmd);
+ rcCmd = pThis->pHGCMDrv->pfnConnect(pThis->pHGCMDrv, pCmd, pCmd->u.connect.pLoc,
+ &pCmd->u.connect.u32ClientID);
+ if (RT_FAILURE(rcCmd))
+ vmmdevHGCMRemoveCommand(pThis, pCmd);
+ break;
+ }
+
+ case VBOXHGCMCMDTYPE_DISCONNECT:
+ {
+ vmmdevHGCMAddCommand(pThis, pCmd);
+ rcCmd = pThis->pHGCMDrv->pfnDisconnect(pThis->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
+ if (RT_FAILURE(rcCmd))
+ vmmdevHGCMRemoveCommand(pThis, pCmd);
+ break;
+ }
+
+ case VBOXHGCMCMDTYPE_CALL:
+ {
+ rcCmd = vmmdevHGCMInitHostParameters(pThis, pCmd, (uint8_t const *)pReqHdr);
+ if (RT_SUCCESS(rcCmd))
+ {
+ vmmdevHGCMAddCommand(pThis, pCmd);
+
+ /* Pass the function call to HGCM connector for actual processing */
+ uint64_t tsNow;
+ STAM_GET_TS(tsNow);
+ rcCmd = pThis->pHGCMDrv->pfnCall(pThis->pHGCMDrv, pCmd,
+ pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
+ pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsNow);
+ if (RT_FAILURE(rcCmd))
+ {
+ LogFunc(("pfnCall rc = %Rrc\n", rcCmd));
+ vmmdevHGCMRemoveCommand(pThis, pCmd);
+ }
+ }
+ break;
+ }
+
+ default:
+ AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
+ }
+ }
+ }
+ else
+ AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
+
+ if (RT_SUCCESS(rcCmd))
+ { /* likely */ }
+ else
+ {
+ /* Return the error to the guest. Guest may try to repeat the call. */
+ pReqHdr->result = rcCmd;
+ pReqHdr->header.rc = rcCmd;
+ pReqHdr->fu32Flags |= VBOX_HGCM_REQ_DONE;
+
+ /* Write back only the header. */
+ PDMDevHlpPhysWrite(pThis->pDevInsR3, pCmd->GCPhys, pReqHdr, sizeof(*pReqHdr));
+
+ VMMDevNotifyGuest(pThis, VMMDEV_EVENT_HGCM);
+
+ /* Deallocate the command memory. */
+ vmmdevHGCMCmdFree(pThis, pCmd);
+ }
+
+ RTMemFree(pReqHdr);
+ }
+
+ if (RT_FAILURE(rcFunc))
+ {
+ RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
+ {
+ RTListNodeRemove(&pCmd->node);
+ vmmdevHGCMCmdFree(pThis, pCmd);
+ }
+ }
+
+ return rcFunc;
+}
+
+
+/**
+ * Counterpart to vmmdevHGCMInit().
+ *
+ * @param pThis The VMMDev instance data.
+ */
+void vmmdevHGCMDestroy(PVMMDEV pThis)
+{
+ LogFlowFunc(("\n"));
+
+ if (RTCritSectIsInitialized(&pThis->critsectHGCMCmdList))
+ {
+ PVBOXHGCMCMD pCmd, pNext;
+ RTListForEachSafe(&pThis->listHGCMCmd, pCmd, pNext, VBOXHGCMCMD, node)
+ {
+ vmmdevHGCMRemoveCommand(pThis, pCmd);
+ vmmdevHGCMCmdFree(pThis, pCmd);
+ }
+
+ RTCritSectDelete(&pThis->critsectHGCMCmdList);
+ }
+
+ AssertCompile((uintptr_t)NIL_RTMEMCACHE == 0);
+ if (pThis->hHgcmCmdCache != NIL_RTMEMCACHE)
+ {
+ RTMemCacheDestroy(pThis->hHgcmCmdCache);
+ pThis->hHgcmCmdCache = NIL_RTMEMCACHE;
+ }
+}
+
+
+/**
+ * Initializes the HGCM specific state.
+ *
+ * Keeps VBOXHGCMCMDCACHED and friends local.
+ *
+ * @returns VBox status code.
+ * @param pThis The VMMDev instance data.
+ */
+int vmmdevHGCMInit(PVMMDEV pThis)
+{
+ LogFlowFunc(("\n"));
+
+ RTListInit(&pThis->listHGCMCmd);
+
+ int rc = RTCritSectInit(&pThis->critsectHGCMCmdList);
+ AssertLogRelRCReturn(rc, rc);
+
+ rc = RTMemCacheCreate(&pThis->hHgcmCmdCache, sizeof(VBOXHGCMCMDCACHED), 64, _1M, NULL, NULL, NULL, 0);
+ AssertLogRelRCReturn(rc, rc);
+
+ pThis->u32HGCMEnabled = 0;
+
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Devices/VMMDev/VMMDevHGCM.h b/src/VBox/Devices/VMMDev/VMMDevHGCM.h
new file mode 100644
index 00000000..b1f2a390
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevHGCM.h
@@ -0,0 +1,49 @@
+/* $Id: VMMDevHGCM.h $ */
+/** @file
+ * VBoxDev - HGCM - Host-Guest Communication Manager, internal header.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_INCLUDED_SRC_VMMDev_VMMDevHGCM_h
+#define VBOX_INCLUDED_SRC_VMMDev_VMMDevHGCM_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include "VMMDevState.h"
+
+RT_C_DECLS_BEGIN
+int vmmdevHGCMConnect(VMMDevState *pVMMDevState, const VMMDevHGCMConnect *pHGCMConnect, RTGCPHYS GCPtr);
+int vmmdevHGCMDisconnect(VMMDevState *pVMMDevState, const VMMDevHGCMDisconnect *pHGCMDisconnect, RTGCPHYS GCPtr);
+int vmmdevHGCMCall(VMMDevState *pVMMDevState, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPtr,
+ VMMDevRequestType enmRequestType, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock);
+int vmmdevHGCMCancel(VMMDevState *pVMMDevState, const VMMDevHGCMCancel *pHGCMCancel, RTGCPHYS GCPtr);
+int vmmdevHGCMCancel2(VMMDevState *pVMMDevState, RTGCPHYS GCPtr);
+
+DECLCALLBACK(int) hgcmCompleted(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmdPtr);
+DECLCALLBACK(bool) hgcmIsCmdRestored(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd);
+DECLCALLBACK(bool) hgcmIsCmdCancelled(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd);
+DECLCALLBACK(uint32_t) hgcmGetRequestor(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd);
+DECLCALLBACK(uint64_t) hgcmGetVMMDevSessionId(PPDMIHGCMPORT pInterface);
+
+int vmmdevHGCMSaveState(VMMDevState *pVMMDevState, PSSMHANDLE pSSM);
+int vmmdevHGCMLoadState(VMMDevState *pVMMDevState, PSSMHANDLE pSSM, uint32_t u32Version);
+int vmmdevHGCMLoadStateDone(VMMDevState *pVMMDevState);
+
+void vmmdevHGCMDestroy(PVMMDEV pThis);
+int vmmdevHGCMInit(PVMMDEV pThis);
+RT_C_DECLS_END
+
+#endif /* !VBOX_INCLUDED_SRC_VMMDev_VMMDevHGCM_h */
+
diff --git a/src/VBox/Devices/VMMDev/VMMDevState.h b/src/VBox/Devices/VMMDev/VMMDevState.h
new file mode 100644
index 00000000..e2febd0c
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevState.h
@@ -0,0 +1,456 @@
+/* $Id: VMMDevState.h $ */
+/** @file
+ * VMMDev - Guest <-> VMM/Host communication device, internal header.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_INCLUDED_SRC_VMMDev_VMMDevState_h
+#define VBOX_INCLUDED_SRC_VMMDev_VMMDevState_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBoxVideo.h> /* For VBVA definitions. */
+#include <VBox/VMMDev.h>
+#include <VBox/vmm/pdmdev.h>
+#include <VBox/vmm/pdmifs.h>
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+# include <iprt/test.h>
+# include <VBox/VMMDevTesting.h>
+#endif
+
+#include <iprt/list.h>
+#include <iprt/memcache.h>
+
+
+#define VMMDEV_WITH_ALT_TIMESYNC
+
+/** Request locking structure (HGCM optimization). */
+typedef struct VMMDEVREQLOCK
+{
+ void *pvReq;
+ PGMPAGEMAPLOCK Lock;
+} VMMDEVREQLOCK;
+/** Pointer to a request lock structure. */
+typedef VMMDEVREQLOCK *PVMMDEVREQLOCK;
+
+typedef struct DISPLAYCHANGEREQUEST
+{
+ bool fPending;
+ bool afAlignment[3];
+ VMMDevDisplayDef displayChangeRequest;
+ VMMDevDisplayDef lastReadDisplayChangeRequest;
+} DISPLAYCHANGEREQUEST;
+
+typedef struct DISPLAYCHANGEDATA
+{
+ /* Which monitor is being reported to the guest. */
+ int32_t iCurrentMonitor;
+
+ /** true if the guest responded to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST at least once */
+ bool fGuestSentChangeEventAck;
+ bool afAlignment[3];
+
+ DISPLAYCHANGEREQUEST aRequests[VBOX_VIDEO_MAX_SCREENS];
+} DISPLAYCHANGEDATA;
+
+
+/**
+ * Credentials for automatic guest logon and host configured logon (?).
+ *
+ * This is not stored in the same block as the instance data in order to make it
+ * harder to access.
+ */
+typedef struct VMMDEVCREDS
+{
+ /** credentials for guest logon purposes */
+ struct
+ {
+ char szUserName[VMMDEV_CREDENTIALS_SZ_SIZE];
+ char szPassword[VMMDEV_CREDENTIALS_SZ_SIZE];
+ char szDomain[VMMDEV_CREDENTIALS_SZ_SIZE];
+ bool fAllowInteractiveLogon;
+ } Logon;
+
+ /** credentials for verification by guest */
+ struct
+ {
+ char szUserName[VMMDEV_CREDENTIALS_SZ_SIZE];
+ char szPassword[VMMDEV_CREDENTIALS_SZ_SIZE];
+ char szDomain[VMMDEV_CREDENTIALS_SZ_SIZE];
+ } Judge;
+} VMMDEVCREDS;
+
+
+/**
+ * Facility status entry.
+ */
+typedef struct VMMDEVFACILITYSTATUSENTRY
+{
+ /** The facility (may contain values other than the defined ones). */
+ VBoxGuestFacilityType enmFacility;
+ /** The status (may contain values other than the defined ones). */
+ VBoxGuestFacilityStatus enmStatus;
+ /** Whether this entry is fixed and cannot be reused when inactive. */
+ bool fFixed;
+ /** Explicit alignment padding / reserved for future use. MBZ. */
+ bool afPadding[3];
+ /** The facility flags (yet to be defined). */
+ uint32_t fFlags;
+ /** Last update timestamp. */
+ RTTIMESPEC TimeSpecTS;
+} VMMDEVFACILITYSTATUSENTRY;
+/** Pointer to a facility status entry. */
+typedef VMMDEVFACILITYSTATUSENTRY *PVMMDEVFACILITYSTATUSENTRY;
+
+
+/**
+ * State structure for the VMM device.
+ */
+typedef struct VMMDevState
+{
+ /** The PCI device structure. */
+ PDMPCIDEV PciDev;
+ /** The critical section for this device.
+ * @remarks We use this rather than the default one, it's simpler with all
+ * the driver interfaces where we have to waste time digging out the
+ * PDMDEVINS structure. */
+ PDMCRITSECT CritSect;
+
+ /** hypervisor address space size */
+ uint32_t hypervisorSize;
+
+ /** mouse capabilities of host and guest */
+ uint32_t mouseCapabilities;
+ /** absolute mouse position in pixels */
+ int32_t mouseXAbs;
+ int32_t mouseYAbs;
+ /** Does the guest currently want the host pointer to be shown? */
+ uint32_t fHostCursorRequested;
+
+//#if HC_ARCH_BITS == 32
+// /** Alignment padding. */
+// uint32_t u32Alignment0;
+//#endif
+
+ /** Pointer to device instance - RC pointer. */
+ PPDMDEVINSRC pDevInsRC;
+ /** Pointer to device instance - R3 poitner. */
+ PPDMDEVINSR3 pDevInsR3;
+ /** Pointer to device instance - R0 pointer. */
+ PPDMDEVINSR0 pDevInsR0;
+
+ /** LUN\#0 + Status: VMMDev port base interface. */
+ PDMIBASE IBase;
+ /** LUN\#0: VMMDev port interface. */
+ PDMIVMMDEVPORT IPort;
+#ifdef VBOX_WITH_HGCM
+ /** LUN\#0: HGCM port interface. */
+ PDMIHGCMPORT IHGCMPort;
+//# if HC_ARCH_BITS == 32
+// RTR3PTR R3PtrAlignment1;
+//# endif
+#endif
+ /** Pointer to base interface of the driver. */
+ R3PTRTYPE(PPDMIBASE) pDrvBase;
+ /** VMMDev connector interface */
+ R3PTRTYPE(PPDMIVMMDEVCONNECTOR) pDrv;
+#ifdef VBOX_WITH_HGCM
+ /** HGCM connector interface */
+ R3PTRTYPE(PPDMIHGCMCONNECTOR) pHGCMDrv;
+#endif
+ /** message buffer for backdoor logging. */
+ char szMsg[512];
+ /** message buffer index. */
+ uint32_t iMsg;
+ /** Alignment padding. */
+ uint32_t u32Alignment2;
+
+ /** Statistics counter for slow IRQ ACK. */
+ STAMCOUNTER StatSlowIrqAck;
+ /** Statistics counter for fast IRQ ACK - R3. */
+ STAMCOUNTER StatFastIrqAckR3;
+ /** Statistics counter for fast IRQ ACK - R0 / RC. */
+ STAMCOUNTER StatFastIrqAckRZ;
+ /** IRQ number assigned to the device */
+ uint32_t irq;
+ /** Current host side event flags */
+ uint32_t u32HostEventFlags;
+ /** Mask of events guest is interested in.
+ * @note The HGCM events are enabled automatically by the VMMDev device when
+ * guest issues HGCM commands. */
+ uint32_t u32GuestFilterMask;
+ /** Delayed mask of guest events */
+ uint32_t u32NewGuestFilterMask;
+ /** Flag whether u32NewGuestFilterMask is valid */
+ bool fNewGuestFilterMask;
+ /** Alignment padding. */
+ bool afAlignment3[3];
+
+ /** GC physical address of VMMDev RAM area */
+ RTGCPHYS32 GCPhysVMMDevRAM;
+ /** R3 pointer to VMMDev RAM area */
+ R3PTRTYPE(VMMDevMemory *) pVMMDevRAMR3;
+ /** R0 pointer to VMMDev RAM area - first page only, could be NULL! */
+ R0PTRTYPE(VMMDevMemory *) pVMMDevRAMR0;
+ /** R0 pointer to VMMDev RAM area - first page only, could be NULL! */
+ RCPTRTYPE(VMMDevMemory *) pVMMDevRAMRC;
+#if HC_ARCH_BITS != 32
+ RTRCPTR RCPtrAlignment3b;
+#endif
+
+ /** R3 pointer to VMMDev Heap RAM area. */
+ R3PTRTYPE(VMMDevMemory *) pVMMDevHeapR3;
+ /** GC physical address of VMMDev Heap RAM area */
+ RTGCPHYS32 GCPhysVMMDevHeap;
+
+ /** Information reported by guest via VMMDevReportGuestInfo generic request.
+ * Until this information is reported the VMMDev refuses any other requests.
+ */
+ VBoxGuestInfo guestInfo;
+ /** Information report \#2, chewed a little. */
+ struct
+ {
+ uint32_t uFullVersion; /**< non-zero if info is present. */
+ uint32_t uRevision;
+ uint32_t fFeatures;
+ char szName[128];
+ } guestInfo2;
+
+ /** Array of guest facility statuses. */
+ VMMDEVFACILITYSTATUSENTRY aFacilityStatuses[32];
+ /** The number of valid entries in the facility status array. */
+ uint32_t cFacilityStatuses;
+
+ /** Information reported by guest via VMMDevReportGuestCapabilities. */
+ uint32_t guestCaps;
+
+ /** "Additions are Ok" indicator, set to true after processing VMMDevReportGuestInfo,
+ * if additions version is compatible. This flag is here to avoid repeated comparing
+ * of the version in guestInfo.
+ */
+ uint32_t fu32AdditionsOk;
+
+ /** Video acceleration status set by guest. */
+ uint32_t u32VideoAccelEnabled;
+
+ DISPLAYCHANGEDATA displayChangeData;
+
+ /** Pointer to the credentials. */
+ R3PTRTYPE(VMMDEVCREDS *) pCredentials;
+
+#if HC_ARCH_BITS == 32
+ uint32_t uAlignment4;
+#endif
+
+ /* memory balloon change request */
+ uint32_t cMbMemoryBalloon;
+ /** The last balloon size queried by the guest additions. */
+ uint32_t cMbMemoryBalloonLast;
+
+ /* guest ram size */
+ uint64_t cbGuestRAM;
+
+ /* unique session id; the id will be different after each start, reset or restore of the VM. */
+ uint64_t idSession;
+
+ /* statistics interval change request */
+ uint32_t u32StatIntervalSize, u32LastStatIntervalSize;
+
+ /* seamless mode change request */
+ bool fLastSeamlessEnabled, fSeamlessEnabled;
+ bool afAlignment5[1];
+
+ bool fVRDPEnabled;
+ uint32_t uVRDPExperienceLevel;
+
+#ifdef VMMDEV_WITH_ALT_TIMESYNC
+ uint64_t hostTime;
+ bool fTimesyncBackdoorLo;
+ bool afAlignment6[2];
+#else
+ bool afAlignment6[1+2];
+#endif
+ /** Set if GetHostTime should fail.
+ * Loaded from the GetHostTimeDisabled configuration value. */
+ bool fGetHostTimeDisabled;
+
+ /** Set if backdoor logging should be disabled (output will be ignored then) */
+ bool fBackdoorLogDisabled;
+
+ /** Don't clear credentials */
+ bool fKeepCredentials;
+
+ /** Heap enabled. */
+ bool fHeapEnabled;
+
+ /** Guest Core Dumping enabled. */
+ bool fGuestCoreDumpEnabled;
+
+ /** Guest Core Dump location. */
+ char szGuestCoreDumpDir[RTPATH_MAX];
+
+ /** Number of additional cores to keep around. */
+ uint32_t cGuestCoreDumps;
+
+#ifdef VBOX_WITH_HGCM
+ /** List of pending HGCM requests (VBOXHGCMCMD). */
+ RTLISTANCHORR3 listHGCMCmd;
+ /** Critical section to protect the list. */
+ RTCRITSECT critsectHGCMCmdList;
+ /** Whether the HGCM events are already automatically enabled. */
+ uint32_t u32HGCMEnabled;
+ /** Saved state version of restored commands. */
+ uint32_t u32SSMVersion;
+ RTMEMCACHE hHgcmCmdCache;
+ STAMPROFILE StatHgcmCmdArrival;
+ STAMPROFILE StatHgcmCmdCompletion;
+ STAMPROFILE StatHgcmCmdTotal;
+ STAMCOUNTER StatHgcmLargeCmdAllocs;
+#endif /* VBOX_WITH_HGCM */
+ STAMCOUNTER StatReqBufAllocs;
+
+ /** Per CPU request 4K sized buffers, allocated as needed. */
+ R3PTRTYPE(VMMDevRequestHeader *) apReqBufs[VMM_MAX_CPU_COUNT];
+
+ /** Status LUN: Shared folders LED */
+ struct
+ {
+ /** The LED. */
+ PDMLED Led;
+ /** The LED ports. */
+ PDMILEDPORTS ILeds;
+ /** Partner of ILeds. */
+ R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
+ } SharedFolders;
+
+ /** FLag whether CPU hotplug events are monitored */
+ bool fCpuHotPlugEventsEnabled;
+ /** Alignment padding. */
+ bool afPadding8[3];
+ /** CPU hotplug event */
+ VMMDevCpuEventType enmCpuHotPlugEvent;
+ /** Core id of the CPU to change */
+ uint32_t idCpuCore;
+ /** Package id of the CPU to change */
+ uint32_t idCpuPackage;
+
+ uint32_t StatMemBalloonChunks;
+
+ /** Set if RC/R0 is enabled. */
+ bool fRZEnabled;
+ /** Set if testing is enabled. */
+ bool fTestingEnabled;
+ /** Set if testing the MMIO testing range is enabled. */
+ bool fTestingMMIO;
+ /** Alignment padding. */
+ bool afPadding9[HC_ARCH_BITS == 32 ? 1 : 5];
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+ /** The high timestamp value. */
+ uint32_t u32TestingHighTimestamp;
+ /** The current testing command (VMMDEV_TESTING_CMD_XXX). */
+ uint32_t u32TestingCmd;
+ /** The testing data offset (command specific). */
+ uint32_t offTestingData;
+ /** For buffering the what comes in over the testing data port. */
+ union
+ {
+ char padding[1024];
+
+ /** VMMDEV_TESTING_CMD_INIT, VMMDEV_TESTING_CMD_SUB_NEW,
+ * VMMDEV_TESTING_CMD_FAILED. */
+ struct
+ {
+ char sz[1024];
+ } String, Init, SubNew, Failed;
+
+ /** VMMDEV_TESTING_CMD_TERM, VMMDEV_TESTING_CMD_SUB_DONE. */
+ struct
+ {
+ uint32_t c;
+ } Error, Term, SubDone;
+
+ /** VMMDEV_TESTING_CMD_VALUE. */
+ struct
+ {
+ RTUINT64U u64Value;
+ uint32_t u32Unit;
+ char szName[1024 - 8 - 4];
+ } Value;
+
+ /** The read back register (VMMDEV_TESTING_MMIO_OFF_READBACK,
+ * VMMDEV_TESTING_MMIO_OFF_READBACK_R3). */
+ uint8_t abReadBack[VMMDEV_TESTING_READBACK_SIZE];
+ } TestingData;
+ /** The XML output file name (can be a named pipe, doesn't matter to us). */
+ R3PTRTYPE(char *) pszTestingXmlOutput;
+ /** Testing instance for dealing with the output. */
+ RTTEST hTestingTest;
+#endif /* !VBOX_WITHOUT_TESTING_FEATURES */
+
+ /** @name Heartbeat
+ * @{ */
+ /** Timestamp of the last heartbeat from guest in nanosec. */
+ uint64_t volatile nsLastHeartbeatTS;
+ /** Indicates whether we missed HB from guest on last check. */
+ bool volatile fFlatlined;
+ /** Indicates whether heartbeat check is active. */
+ bool volatile fHeartbeatActive;
+ /** Alignment padding. */
+ bool afAlignment8[6];
+ /** Guest heartbeat interval in nanoseconds.
+ * This is the interval the guest is told to produce heartbeats at. */
+ uint64_t cNsHeartbeatInterval;
+ /** The amount of time without a heartbeat (nanoseconds) before we
+ * conclude the guest is doing a Dixie Flatline (Neuromancer) impression. */
+ uint64_t cNsHeartbeatTimeout;
+ /** Timer for signalling a flatlined guest. */
+ PTMTIMERR3 pFlatlinedTimer;
+ /** @} */
+} VMMDevState;
+typedef VMMDevState VMMDEV;
+/** Pointer to the VMM device state. */
+typedef VMMDEV *PVMMDEV;
+AssertCompileMemberAlignment(VMMDEV, CritSect, 8);
+AssertCompileMemberAlignment(VMMDEV, StatSlowIrqAck, 8);
+AssertCompileMemberAlignment(VMMDEV, cbGuestRAM, 8);
+AssertCompileMemberAlignment(VMMDEV, enmCpuHotPlugEvent, 4);
+AssertCompileMemberAlignment(VMMDEV, aFacilityStatuses, 8);
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+AssertCompileMemberAlignment(VMMDEV, TestingData.Value.u64Value, 8);
+#endif
+
+
+void VMMDevNotifyGuest(VMMDEV *pVMMDevState, uint32_t u32EventMask);
+void VMMDevCtlSetGuestFilterMask(VMMDEV *pVMMDevState, uint32_t u32OrMask, uint32_t u32NotMask);
+
+
+/** The saved state version. */
+#define VMMDEV_SAVED_STATE_VERSION VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS
+/** Updated HGCM commands. */
+#define VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS 17
+/** The saved state version with heartbeat state. */
+#define VMMDEV_SAVED_STATE_VERSION_HEARTBEAT 16
+/** The saved state version without heartbeat state. */
+#define VMMDEV_SAVED_STATE_VERSION_NO_HEARTBEAT 15
+/** The saved state version which is missing the guest facility statuses. */
+#define VMMDEV_SAVED_STATE_VERSION_MISSING_FACILITY_STATUSES 14
+/** The saved state version which is missing the guestInfo2 bits. */
+#define VMMDEV_SAVED_STATE_VERSION_MISSING_GUEST_INFO_2 13
+/** The saved state version used by VirtualBox 3.0.
+ * This doesn't have the config part. */
+#define VMMDEV_SAVED_STATE_VERSION_VBOX_30 11
+
+#endif /* !VBOX_INCLUDED_SRC_VMMDev_VMMDevState_h */
+
diff --git a/src/VBox/Devices/VMMDev/VMMDevTesting.cpp b/src/VBox/Devices/VMMDev/VMMDevTesting.cpp
new file mode 100644
index 00000000..138536fe
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevTesting.cpp
@@ -0,0 +1,776 @@
+/* $Id: VMMDevTesting.cpp $ */
+/** @file
+ * VMMDev - Testing Extensions.
+ *
+ * To enable: VBoxManage setextradata vmname VBoxInternal/Devices/VMMDev/0/Config/TestingEnabled 1
+ */
+
+/*
+ * Copyright (C) 2010-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_DEV_VMM
+#include <VBox/VMMDev.h>
+#include <VBox/vmm/vmapi.h>
+#include <VBox/log.h>
+#include <VBox/err.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+#include <iprt/test.h>
+
+#include "VMMDevState.h"
+#include "VMMDevTesting.h"
+
+
+#ifndef VBOX_WITHOUT_TESTING_FEATURES
+
+#define VMMDEV_TESTING_OUTPUT(a) \
+ do \
+ { \
+ LogAlways(a);\
+ LogRel(a);\
+ } while (0)
+
+/**
+ * @callback_method_impl{FNIOMMMIOWRITE}
+ */
+PDMBOTHCBDECL(int) vmmdevTestingMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
+{
+ RT_NOREF_PV(pvUser);
+
+ switch (GCPhysAddr)
+ {
+ case VMMDEV_TESTING_MMIO_NOP_R3:
+#ifndef IN_RING3
+ return VINF_IOM_R3_MMIO_WRITE;
+#endif
+ case VMMDEV_TESTING_MMIO_NOP:
+ return VINF_SUCCESS;
+
+ default:
+ {
+ /*
+ * Readback register (64 bytes wide).
+ */
+ uint32_t off = GCPhysAddr - VMMDEV_TESTING_MMIO_BASE;
+ if ( ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK + VMMDEV_TESTING_READBACK_SIZE)
+#ifndef IN_RING3
+ || ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK_R3
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK_R3 + VMMDEV_TESTING_READBACK_SIZE)
+#endif
+ )
+ {
+ VMMDevState *pThis = PDMINS_2_DATA(pDevIns, VMMDevState *);
+ off &= VMMDEV_TESTING_READBACK_SIZE - 1;
+ switch (cb)
+ {
+ case 8: *(uint64_t *)&pThis->TestingData.abReadBack[off] = *(uint64_t const *)pv; break;
+ case 4: *(uint32_t *)&pThis->TestingData.abReadBack[off] = *(uint32_t const *)pv; break;
+ case 2: *(uint16_t *)&pThis->TestingData.abReadBack[off] = *(uint16_t const *)pv; break;
+ case 1: *(uint8_t *)&pThis->TestingData.abReadBack[off] = *(uint8_t const *)pv; break;
+ default: memcpy(&pThis->TestingData.abReadBack[off], pv, cb); break;
+ }
+ return VINF_SUCCESS;
+ }
+#ifndef IN_RING3
+ if ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK_R3
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK_R3 + 64)
+ return VINF_IOM_R3_MMIO_WRITE;
+#endif
+
+ break;
+ }
+
+ /*
+ * Odd NOP accesses.
+ */
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 1:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 2:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 3:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 4:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 5:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 6:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 7:
+#ifndef IN_RING3
+ return VINF_IOM_R3_MMIO_WRITE;
+#endif
+ case VMMDEV_TESTING_MMIO_NOP + 1:
+ case VMMDEV_TESTING_MMIO_NOP + 2:
+ case VMMDEV_TESTING_MMIO_NOP + 3:
+ case VMMDEV_TESTING_MMIO_NOP + 4:
+ case VMMDEV_TESTING_MMIO_NOP + 5:
+ case VMMDEV_TESTING_MMIO_NOP + 6:
+ case VMMDEV_TESTING_MMIO_NOP + 7:
+ return VINF_SUCCESS;
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * @callback_method_impl{FNIOMMMIOREAD}
+ */
+PDMBOTHCBDECL(int) vmmdevTestingMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
+{
+ RT_NOREF_PV(pvUser);
+
+ switch (GCPhysAddr)
+ {
+ case VMMDEV_TESTING_MMIO_NOP_R3:
+#ifndef IN_RING3
+ return VINF_IOM_R3_MMIO_READ;
+#endif
+ /* fall thru. */
+ case VMMDEV_TESTING_MMIO_NOP:
+ switch (cb)
+ {
+ case 8:
+ *(uint64_t *)pv = VMMDEV_TESTING_NOP_RET | ((uint64_t)VMMDEV_TESTING_NOP_RET << 32);
+ break;
+ case 4:
+ *(uint32_t *)pv = VMMDEV_TESTING_NOP_RET;
+ break;
+ case 2:
+ *(uint16_t *)pv = RT_LO_U16(VMMDEV_TESTING_NOP_RET);
+ break;
+ case 1:
+ *(uint8_t *)pv = (uint8_t)(VMMDEV_TESTING_NOP_RET & UINT8_MAX);
+ break;
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_5;
+ }
+ return VINF_SUCCESS;
+
+
+ default:
+ {
+ /*
+ * Readback register (64 bytes wide).
+ */
+ uint32_t off = GCPhysAddr - VMMDEV_TESTING_MMIO_BASE;
+ if ( ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK + 64)
+#ifndef IN_RING3
+ || ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK_R3
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK_R3 + 64)
+#endif
+ )
+ {
+ VMMDevState *pThis = PDMINS_2_DATA(pDevIns, VMMDevState *);
+ off &= 0x3f;
+ switch (cb)
+ {
+ case 8: *(uint64_t *)pv = *(uint64_t const *)&pThis->TestingData.abReadBack[off]; break;
+ case 4: *(uint32_t *)pv = *(uint32_t const *)&pThis->TestingData.abReadBack[off]; break;
+ case 2: *(uint16_t *)pv = *(uint16_t const *)&pThis->TestingData.abReadBack[off]; break;
+ case 1: *(uint8_t *)pv = *(uint8_t const *)&pThis->TestingData.abReadBack[off]; break;
+ default: memcpy(pv, &pThis->TestingData.abReadBack[off], cb); break;
+ }
+ return VINF_SUCCESS;
+ }
+#ifndef IN_RING3
+ if ( off >= VMMDEV_TESTING_MMIO_OFF_READBACK_R3
+ && off + cb <= VMMDEV_TESTING_MMIO_OFF_READBACK_R3 + 64)
+ return VINF_IOM_R3_MMIO_READ;
+#endif
+ break;
+ }
+
+ /*
+ * Odd NOP accesses (for 16-bit code mainly).
+ */
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 1:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 2:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 3:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 4:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 5:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 6:
+ case VMMDEV_TESTING_MMIO_NOP_R3 + 7:
+#ifndef IN_RING3
+ return VINF_IOM_R3_MMIO_READ;
+#endif
+ case VMMDEV_TESTING_MMIO_NOP + 1:
+ case VMMDEV_TESTING_MMIO_NOP + 2:
+ case VMMDEV_TESTING_MMIO_NOP + 3:
+ case VMMDEV_TESTING_MMIO_NOP + 4:
+ case VMMDEV_TESTING_MMIO_NOP + 5:
+ case VMMDEV_TESTING_MMIO_NOP + 6:
+ case VMMDEV_TESTING_MMIO_NOP + 7:
+ {
+ static uint8_t const s_abNopValue[8] =
+ {
+ VMMDEV_TESTING_NOP_RET & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 8) & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 16) & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 24) & 0xff,
+ VMMDEV_TESTING_NOP_RET & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 8) & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 16) & 0xff,
+ (VMMDEV_TESTING_NOP_RET >> 24) & 0xff,
+ };
+
+ memset(pv, 0xff, cb);
+ memcpy(pv, &s_abNopValue[GCPhysAddr & 7], RT_MIN(8 - (GCPhysAddr & 7), cb));
+ return VINF_SUCCESS;
+ }
+ }
+
+ return VINF_IOM_MMIO_UNUSED_FF;
+}
+
+#ifdef IN_RING3
+
+/**
+ * Executes the VMMDEV_TESTING_CMD_VALUE_REG command when the data is ready.
+ *
+ * @param pDevIns The PDM device instance.
+ * @param pThis The instance VMMDev data.
+ */
+static void vmmdevTestingCmdExec_ValueReg(PPDMDEVINS pDevIns, VMMDevState *pThis)
+{
+ char *pszRegNm = strchr(pThis->TestingData.String.sz, ':');
+ if (pszRegNm)
+ {
+ *pszRegNm++ = '\0';
+ pszRegNm = RTStrStrip(pszRegNm);
+ }
+ char *pszValueNm = RTStrStrip(pThis->TestingData.String.sz);
+ size_t const cchValueNm = strlen(pszValueNm);
+ if (cchValueNm && pszRegNm && *pszRegNm)
+ {
+ PUVM pUVM = PDMDevHlpGetUVM(pDevIns);
+ PVM pVM = PDMDevHlpGetVM(pDevIns);
+ VMCPUID idCpu = VMMGetCpuId(pVM);
+ uint64_t u64Value;
+ int rc2 = DBGFR3RegNmQueryU64(pUVM, idCpu, pszRegNm, &u64Value);
+ if (RT_SUCCESS(rc2))
+ {
+ const char *pszWarn = rc2 == VINF_DBGF_TRUNCATED_REGISTER ? " truncated" : "";
+#if 1 /*!RTTestValue format*/
+ char szFormat[128], szValue[128];
+ RTStrPrintf(szFormat, sizeof(szFormat), "%%VR{%s}", pszRegNm);
+ rc2 = DBGFR3RegPrintf(pUVM, idCpu, szValue, sizeof(szValue), szFormat);
+ if (RT_SUCCESS(rc2))
+ VMMDEV_TESTING_OUTPUT(("testing: VALUE '%s'%*s: %16s {reg=%s}%s\n",
+ pszValueNm,
+ (ssize_t)cchValueNm - 12 > 48 ? 0 : 48 - ((ssize_t)cchValueNm - 12), "",
+ szValue, pszRegNm, pszWarn));
+ else
+#endif
+ VMMDEV_TESTING_OUTPUT(("testing: VALUE '%s'%*s: %'9llu (%#llx) [0] {reg=%s}%s\n",
+ pszValueNm,
+ (ssize_t)cchValueNm - 12 > 48 ? 0 : 48 - ((ssize_t)cchValueNm - 12), "",
+ u64Value, u64Value, pszRegNm, pszWarn));
+ }
+ else
+ VMMDEV_TESTING_OUTPUT(("testing: error querying register '%s' for value '%s': %Rrc\n",
+ pszRegNm, pszValueNm, rc2));
+ }
+ else
+ VMMDEV_TESTING_OUTPUT(("testing: malformed register value '%s'/'%s'\n", pszValueNm, pszRegNm));
+}
+
+#endif /* IN_RING3 */
+
+/**
+ * @callback_method_impl{FNIOMIOPORTOUT}
+ */
+PDMBOTHCBDECL(int) vmmdevTestingIoWrite(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
+{
+ VMMDevState *pThis = PDMINS_2_DATA(pDevIns, VMMDevState *);
+ RT_NOREF_PV(pvUser);
+
+ switch (uPort)
+ {
+ /*
+ * The NOP I/O ports are used for performance measurements.
+ */
+ case VMMDEV_TESTING_IOPORT_NOP:
+ switch (cb)
+ {
+ case 4:
+ case 2:
+ case 1:
+ break;
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+ return VINF_SUCCESS;
+
+ case VMMDEV_TESTING_IOPORT_NOP_R3:
+ switch (cb)
+ {
+ case 4:
+ case 2:
+ case 1:
+#ifndef IN_RING3
+ return VINF_IOM_R3_IOPORT_WRITE;
+#else
+ return VINF_SUCCESS;
+#endif
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+
+ /* The timestamp I/O ports are read-only. */
+ case VMMDEV_TESTING_IOPORT_TS_LOW:
+ case VMMDEV_TESTING_IOPORT_TS_HIGH:
+ break;
+
+ /*
+ * The command port (DWORD and WORD write only).
+ * (We have to allow WORD writes for 286, 186 and 8086 execution modes.)
+ */
+ case VMMDEV_TESTING_IOPORT_CMD:
+ if (cb == 2)
+ {
+ u32 |= VMMDEV_TESTING_CMD_MAGIC_HI_WORD;
+ cb = 4;
+ }
+ if (cb == 4)
+ {
+ pThis->u32TestingCmd = u32;
+ pThis->offTestingData = 0;
+ RT_ZERO(pThis->TestingData);
+ return VINF_SUCCESS;
+ }
+ break;
+
+ /*
+ * The data port. Used of providing data for a command.
+ */
+ case VMMDEV_TESTING_IOPORT_DATA:
+ {
+ uint32_t uCmd = pThis->u32TestingCmd;
+ uint32_t off = pThis->offTestingData;
+ switch (uCmd)
+ {
+ case VMMDEV_TESTING_CMD_INIT:
+ case VMMDEV_TESTING_CMD_SUB_NEW:
+ case VMMDEV_TESTING_CMD_FAILED:
+ case VMMDEV_TESTING_CMD_SKIPPED:
+ case VMMDEV_TESTING_CMD_PRINT:
+ if ( off < sizeof(pThis->TestingData.String.sz) - 1
+ && cb == 1)
+ {
+ if (u32)
+ {
+ pThis->TestingData.String.sz[off] = u32;
+ pThis->offTestingData = off + 1;
+ }
+ else
+ {
+#ifdef IN_RING3
+ pThis->TestingData.String.sz[off] = '\0';
+ switch (uCmd)
+ {
+ case VMMDEV_TESTING_CMD_INIT:
+ VMMDEV_TESTING_OUTPUT(("testing: INIT '%s'\n", pThis->TestingData.String.sz));
+ if (pThis->hTestingTest != NIL_RTTEST)
+ {
+ RTTestChangeName(pThis->hTestingTest, pThis->TestingData.String.sz);
+ RTTestBanner(pThis->hTestingTest);
+ }
+ break;
+ case VMMDEV_TESTING_CMD_SUB_NEW:
+ VMMDEV_TESTING_OUTPUT(("testing: SUB_NEW '%s'\n", pThis->TestingData.String.sz));
+ if (pThis->hTestingTest != NIL_RTTEST)
+ RTTestSub(pThis->hTestingTest, pThis->TestingData.String.sz);
+ break;
+ case VMMDEV_TESTING_CMD_FAILED:
+ if (pThis->hTestingTest != NIL_RTTEST)
+ RTTestFailed(pThis->hTestingTest, "%s", pThis->TestingData.String.sz);
+ VMMDEV_TESTING_OUTPUT(("testing: FAILED '%s'\n", pThis->TestingData.String.sz));
+ break;
+ case VMMDEV_TESTING_CMD_SKIPPED:
+ if (pThis->hTestingTest != NIL_RTTEST)
+ {
+ if (off)
+ RTTestSkipped(pThis->hTestingTest, "%s", pThis->TestingData.String.sz);
+ else
+ RTTestSkipped(pThis->hTestingTest, NULL);
+ }
+ VMMDEV_TESTING_OUTPUT(("testing: SKIPPED '%s'\n", pThis->TestingData.String.sz));
+ break;
+ case VMMDEV_TESTING_CMD_PRINT:
+ if (pThis->hTestingTest != NIL_RTTEST && off)
+ RTTestPrintf(pThis->hTestingTest, RTTESTLVL_ALWAYS, "%s", pThis->TestingData.String.sz);
+ VMMDEV_TESTING_OUTPUT(("testing: '%s'\n", pThis->TestingData.String.sz));
+ break;
+ }
+#else
+ return VINF_IOM_R3_IOPORT_WRITE;
+#endif
+ }
+ return VINF_SUCCESS;
+ }
+ break;
+
+ case VMMDEV_TESTING_CMD_TERM:
+ case VMMDEV_TESTING_CMD_SUB_DONE:
+ if (cb == 2)
+ {
+ if (off == 0)
+ {
+ pThis->TestingData.Error.c = u32;
+ pThis->offTestingData = 2;
+ break;
+ }
+ if (off == 2)
+ {
+ u32 <<= 16;
+ u32 |= pThis->TestingData.Error.c & UINT16_MAX;
+ cb = 4;
+ off = 0;
+ }
+ else
+ break;
+ }
+
+ if ( off == 0
+ && cb == 4)
+ {
+#ifdef IN_RING3
+ pThis->TestingData.Error.c = u32;
+ if (uCmd == VMMDEV_TESTING_CMD_TERM)
+ {
+ if (pThis->hTestingTest != NIL_RTTEST)
+ {
+ while (RTTestErrorCount(pThis->hTestingTest) < u32)
+ RTTestErrorInc(pThis->hTestingTest); /* A bit stupid, but does the trick. */
+ RTTestSubDone(pThis->hTestingTest);
+ RTTestSummaryAndDestroy(pThis->hTestingTest);
+ pThis->hTestingTest = NIL_RTTEST;
+ }
+ VMMDEV_TESTING_OUTPUT(("testing: TERM - %u errors\n", u32));
+ }
+ else
+ {
+ if (pThis->hTestingTest != NIL_RTTEST)
+ {
+ while (RTTestSubErrorCount(pThis->hTestingTest) < u32)
+ RTTestErrorInc(pThis->hTestingTest); /* A bit stupid, but does the trick. */
+ RTTestSubDone(pThis->hTestingTest);
+ }
+ VMMDEV_TESTING_OUTPUT(("testing: SUB_DONE - %u errors\n", u32));
+ }
+ return VINF_SUCCESS;
+#else
+ return VINF_IOM_R3_IOPORT_WRITE;
+#endif
+ }
+ break;
+
+ case VMMDEV_TESTING_CMD_VALUE:
+ if (cb == 4)
+ {
+ if (off == 0)
+ pThis->TestingData.Value.u64Value.s.Lo = u32;
+ else if (off == 4)
+ pThis->TestingData.Value.u64Value.s.Hi = u32;
+ else if (off == 8)
+ pThis->TestingData.Value.u32Unit = u32;
+ else
+ break;
+ pThis->offTestingData = off + 4;
+ return VINF_SUCCESS;
+ }
+ if (cb == 2)
+ {
+ if (off == 0)
+ pThis->TestingData.Value.u64Value.Words.w0 = (uint16_t)u32;
+ else if (off == 2)
+ pThis->TestingData.Value.u64Value.Words.w1 = (uint16_t)u32;
+ else if (off == 4)
+ pThis->TestingData.Value.u64Value.Words.w2 = (uint16_t)u32;
+ else if (off == 6)
+ pThis->TestingData.Value.u64Value.Words.w3 = (uint16_t)u32;
+ else if (off == 8)
+ pThis->TestingData.Value.u32Unit = (uint16_t)u32;
+ else if (off == 10)
+ pThis->TestingData.Value.u32Unit = u32 << 16;
+ else
+ break;
+ pThis->offTestingData = off + 2;
+ return VINF_SUCCESS;
+ }
+
+ if ( off >= 12
+ && cb == 1
+ && off - 12 < sizeof(pThis->TestingData.Value.szName) - 1)
+ {
+ if (u32)
+ {
+ pThis->TestingData.Value.szName[off - 12] = u32;
+ pThis->offTestingData = off + 1;
+ }
+ else
+ {
+#ifdef IN_RING3
+ pThis->TestingData.Value.szName[off - 12] = '\0';
+
+ RTTESTUNIT enmUnit = (RTTESTUNIT)pThis->TestingData.Value.u32Unit;
+ if (enmUnit <= RTTESTUNIT_INVALID || enmUnit >= RTTESTUNIT_END)
+ {
+ VMMDEV_TESTING_OUTPUT(("Invalid log value unit %#x\n", pThis->TestingData.Value.u32Unit));
+ enmUnit = RTTESTUNIT_NONE;
+ }
+ if (pThis->hTestingTest != NIL_RTTEST)
+ RTTestValue(pThis->hTestingTest, pThis->TestingData.Value.szName,
+ pThis->TestingData.Value.u64Value.u, enmUnit);
+
+ VMMDEV_TESTING_OUTPUT(("testing: VALUE '%s'%*s: %'9llu (%#llx) [%u]\n",
+ pThis->TestingData.Value.szName,
+ off - 12 > 48 ? 0 : 48 - (off - 12), "",
+ pThis->TestingData.Value.u64Value.u, pThis->TestingData.Value.u64Value.u,
+ pThis->TestingData.Value.u32Unit));
+#else
+ return VINF_IOM_R3_IOPORT_WRITE;
+#endif
+ }
+ return VINF_SUCCESS;
+ }
+ break;
+
+
+ /*
+ * RTTestValue with the output from DBGFR3RegNmQuery.
+ */
+ case VMMDEV_TESTING_CMD_VALUE_REG:
+ {
+ if ( off < sizeof(pThis->TestingData.String.sz) - 1
+ && cb == 1)
+ {
+ pThis->TestingData.String.sz[off] = u32;
+ if (u32)
+ pThis->offTestingData = off + 1;
+ else
+#ifdef IN_RING3
+ vmmdevTestingCmdExec_ValueReg(pDevIns, pThis);
+#else
+ return VINF_IOM_R3_IOPORT_WRITE;
+#endif
+ return VINF_SUCCESS;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ Log(("VMMDEV_TESTING_IOPORT_CMD: bad access; cmd=%#x off=%#x cb=%#x u32=%#x\n", uCmd, off, cb, u32));
+ return VINF_SUCCESS;
+ }
+
+ default:
+ break;
+ }
+
+ return VERR_IOM_IOPORT_UNUSED;
+}
+
+
+/**
+ * @callback_method_impl{FNIOMIOPORTIN}
+ */
+PDMBOTHCBDECL(int) vmmdevTestingIoRead(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
+{
+ VMMDevState *pThis = PDMINS_2_DATA(pDevIns, VMMDevState *);
+ RT_NOREF_PV(pvUser);
+
+ switch (uPort)
+ {
+ /*
+ * The NOP I/O ports are used for performance measurements.
+ */
+ case VMMDEV_TESTING_IOPORT_NOP:
+ switch (cb)
+ {
+ case 4:
+ case 2:
+ case 1:
+ break;
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+ *pu32 = VMMDEV_TESTING_NOP_RET;
+ return VINF_SUCCESS;
+
+ case VMMDEV_TESTING_IOPORT_NOP_R3:
+ switch (cb)
+ {
+ case 4:
+ case 2:
+ case 1:
+#ifndef IN_RING3
+ return VINF_IOM_R3_IOPORT_READ;
+#else
+ *pu32 = VMMDEV_TESTING_NOP_RET;
+ return VINF_SUCCESS;
+#endif
+ default:
+ AssertFailed();
+ return VERR_INTERNAL_ERROR_2;
+ }
+
+ /*
+ * The timestamp I/O ports are obviously used for getting a good fix
+ * on the current time (as seen by the host?).
+ *
+ * The high word is latched when reading the low, so reading low + high
+ * gives you a 64-bit timestamp value.
+ */
+ case VMMDEV_TESTING_IOPORT_TS_LOW:
+ if (cb == 4)
+ {
+ uint64_t NowTS = RTTimeNanoTS();
+ *pu32 = (uint32_t)NowTS;
+ pThis->u32TestingHighTimestamp = (uint32_t)(NowTS >> 32);
+ return VINF_SUCCESS;
+ }
+ break;
+
+ case VMMDEV_TESTING_IOPORT_TS_HIGH:
+ if (cb == 4)
+ {
+ *pu32 = pThis->u32TestingHighTimestamp;
+ return VINF_SUCCESS;
+ }
+ break;
+
+ /*
+ * The command and data registers are write-only.
+ */
+ case VMMDEV_TESTING_IOPORT_CMD:
+ case VMMDEV_TESTING_IOPORT_DATA:
+ break;
+
+ default:
+ break;
+ }
+
+ return VERR_IOM_IOPORT_UNUSED;
+}
+
+
+#ifdef IN_RING3
+
+/**
+ * Initializes the testing part of the VMMDev if enabled.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The VMMDev device instance.
+ */
+void vmmdevTestingTerminate(PPDMDEVINS pDevIns)
+{
+ VMMDevState *pThis = PDMINS_2_DATA(pDevIns, VMMDevState *);
+ if (!pThis->fTestingEnabled)
+ return;
+
+ if (pThis->hTestingTest != NIL_RTTEST)
+ {
+ RTTestFailed(pThis->hTestingTest, "Still open at vmmdev destruction.");
+ RTTestSummaryAndDestroy(pThis->hTestingTest);
+ pThis->hTestingTest = NIL_RTTEST;
+ }
+}
+
+
+/**
+ * Initializes the testing part of the VMMDev if enabled.
+ *
+ * @returns VBox status code.
+ * @param pDevIns The VMMDev device instance.
+ */
+int vmmdevTestingInitialize(PPDMDEVINS pDevIns)
+{
+ VMMDevState *pThis = PDMINS_2_DATA(pDevIns, VMMDevState *);
+ int rc;
+
+ if (!pThis->fTestingEnabled)
+ return VINF_SUCCESS;
+
+ if (pThis->fTestingMMIO)
+ {
+ /*
+ * Register a chunk of MMIO memory that we'll use for various
+ * tests interfaces. Optional, needs to be explicitly enabled.
+ */
+ rc = PDMDevHlpMMIORegister(pDevIns, VMMDEV_TESTING_MMIO_BASE, VMMDEV_TESTING_MMIO_SIZE, NULL /*pvUser*/,
+ IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
+ vmmdevTestingMmioWrite, vmmdevTestingMmioRead, "VMMDev Testing");
+ AssertRCReturn(rc, rc);
+ if (pThis->fRZEnabled)
+ {
+ rc = PDMDevHlpMMIORegisterR0(pDevIns, VMMDEV_TESTING_MMIO_BASE, VMMDEV_TESTING_MMIO_SIZE, NIL_RTR0PTR /*pvUser*/,
+ "vmmdevTestingMmioWrite", "vmmdevTestingMmioRead");
+ AssertRCReturn(rc, rc);
+ rc = PDMDevHlpMMIORegisterRC(pDevIns, VMMDEV_TESTING_MMIO_BASE, VMMDEV_TESTING_MMIO_SIZE, NIL_RTRCPTR /*pvUser*/,
+ "vmmdevTestingMmioWrite", "vmmdevTestingMmioRead");
+ AssertRCReturn(rc, rc);
+ }
+ }
+
+
+ /*
+ * Register the I/O ports used for testing.
+ */
+ rc = PDMDevHlpIOPortRegister(pDevIns, VMMDEV_TESTING_IOPORT_BASE, VMMDEV_TESTING_IOPORT_COUNT, NULL,
+ vmmdevTestingIoWrite,
+ vmmdevTestingIoRead,
+ NULL /*pfnOutStr*/,
+ NULL /*pfnInStr*/,
+ "VMMDev Testing");
+ AssertRCReturn(rc, rc);
+ if (pThis->fRZEnabled)
+ {
+ rc = PDMDevHlpIOPortRegisterR0(pDevIns, VMMDEV_TESTING_IOPORT_BASE, VMMDEV_TESTING_IOPORT_COUNT, NIL_RTR0PTR /*pvUser*/,
+ "vmmdevTestingIoWrite",
+ "vmmdevTestingIoRead",
+ NULL /*pszOutStr*/,
+ NULL /*pszInStr*/,
+ "VMMDev Testing");
+ AssertRCReturn(rc, rc);
+ rc = PDMDevHlpIOPortRegisterRC(pDevIns, VMMDEV_TESTING_IOPORT_BASE, VMMDEV_TESTING_IOPORT_COUNT, NIL_RTRCPTR /*pvUser*/,
+ "vmmdevTestingIoWrite",
+ "vmmdevTestingIoRead",
+ NULL /*pszOutStr*/,
+ NULL /*pszInStr*/,
+ "VMMDev Testing");
+ AssertRCReturn(rc, rc);
+ }
+
+ /*
+ * Open the XML output file(/pipe/whatever) if specfied.
+ */
+ rc = RTTestCreateEx("VMMDevTesting", RTTEST_C_USE_ENV | RTTEST_C_NO_TLS | RTTEST_C_XML_DELAY_TOP_TEST,
+ RTTESTLVL_INVALID, -1 /*iNativeTestPipe*/, pThis->pszTestingXmlOutput, &pThis->hTestingTest);
+ if (RT_FAILURE(rc))
+ return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, "Error creating testing instance");
+
+ return VINF_SUCCESS;
+}
+
+#endif /* IN_RING3 */
+#endif /* !VBOX_WITHOUT_TESTING_FEATURES */
+
diff --git a/src/VBox/Devices/VMMDev/VMMDevTesting.h b/src/VBox/Devices/VMMDev/VMMDevTesting.h
new file mode 100644
index 00000000..1fb6fee0
--- /dev/null
+++ b/src/VBox/Devices/VMMDev/VMMDevTesting.h
@@ -0,0 +1,35 @@
+/* $Id: VMMDevTesting.h $ */
+/** @file
+ * VMMDev - Guest <-> VMM/Host communication device, internal header.
+ */
+
+/*
+ * Copyright (C) 2010-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBOX_INCLUDED_SRC_VMMDev_VMMDevTesting_h
+#define VBOX_INCLUDED_SRC_VMMDev_VMMDevTesting_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/types.h>
+#include <VBox/VMMDevTesting.h>
+
+RT_C_DECLS_BEGIN
+
+int vmmdevTestingInitialize(PPDMDEVINS pDevIns);
+void vmmdevTestingTerminate(PPDMDEVINS pDevIns);
+
+RT_C_DECLS_END
+
+#endif /* !VBOX_INCLUDED_SRC_VMMDev_VMMDevTesting_h */
+